storm集群配置以及java编写拓扑例子
storm集群配置
storm配置相当简单
安装
tar -zxvf apache-storm-1.2.2.tar.gz
rm apache-storm-1.2.2.tar.gz
mv apache-storm-1.2.2 storm
sudo vim /etc/profile
export STORM_HOME=/usr/local/storm
export PATH=$PATH:$STORM_HOME/bin
source /etc/profile
apt install python
准备 master worker1 worker2 worker3 这四台机器
首先确保你的zookeeper集群能够正常运行worker1 worker2 worker3为zk集群
具体配置参照我的博客https://www.cnblogs.com/ye-hcj/p/9889585.html
修改配置文件
storm.yaml
sudo vim storm.yaml
在四台机器中都加入如下配置
storm.zookeeper.servers:
- "worker1"
- "worker2"
- "worker3" storm.local.dir: "/usr/local/tmpdata/storm" supervisor.slots.ports:
- 6700
- 6701
- 6702
- 6703 nimbus.seeds: ["master"] storm.zookeeper.port: 2181 // 不加下面这几个你的拓扑直接跑不起来
nimbus.childopts: "-Xmx1024m" supervisor.childopts: "-Xmx1024m" worker.childopts: "-Xmx768m"
启动
在master中运行
storm nimbus >> /dev/null &
storm ui >/dev/null 2>&1 &
在worker1,worker2,worker3中运行
storm supervisor >/dev/null 2>&1 &
storm logviewer >/dev/null 2>&1 & 直接访问http://master:8080即可
使用java编写拓扑
四个文件如图
pom.xml
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>test</groupId>
<artifactId>test</artifactId>
<version>1.0.0</version>
<name>test</name>
<description>Test project for spring boot mybatis</description>
<packaging>jar</packaging> <properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.encoding>UTF-8</maven.compiler.encoding>
<java.version>1.8</java.version>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties> <dependencies>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId>
<version>1.2.2</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
</dependency>
</dependencies> <build>
<plugins>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
App.java
package test;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.utils.Utils; public class App
{
public static void main( String[] args ) throws Exception { TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("word",new WordSpout(),1);
topologyBuilder.setBolt("receive",new RecieveBolt(),1).shuffleGrouping("word");
topologyBuilder.setBolt("print",new ConsumeBolt(),1).shuffleGrouping("receive"); // 集群运行
Config config = new Config();
config.setNumWorkers(3);
config.setDebug(true);
StormSubmitter.submitTopology("teststorm", config, topologyBuilder.createTopology()); // 本地测试
// Config config = new Config();
// config.setNumWorkers(3);
// config.setDebug(true);
// config.setMaxTaskParallelism(20);
// LocalCluster cluster = new LocalCluster();
// cluster.submitTopology("wordCountTopology", config, topologyBuilder.createTopology());
// Utils.sleep(60000);
// 执行完毕,关闭cluster
// cluster.shutdown();
}
}
WordSpout.java
package test; import java.util.Map;
import java.util.Random; import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils; public class WordSpout extends BaseRichSpout { private static final long serialVersionUID = 6102239192526611945L; private SpoutOutputCollector collector; Random random = new Random(); // 初始化tuple的collector
public void open(Map conf, TopologyContext topologyContext, SpoutOutputCollector collector) {
this.collector = collector;
} public void nextTuple() {
// 模拟产生消息队列
String[] words = {"iphone","xiaomi","mate","sony","sumsung","moto","meizu"}; final String word = words[random.nextInt(words.length)]; // 提交一个tuple给默认的输出流
this.collector.emit(new Values(word)); Utils.sleep(5000);
} // 声明发送消息的字段名
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("word"));
}
}
RecieveBolt.java
package test; import java.util.Map; import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values; public class RecieveBolt extends BaseRichBolt { private static final long serialVersionUID = -4758047349803579486L; private OutputCollector collector; public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
this.collector = collector;
} public void execute(Tuple tuple) {
// 将spout传递过来的tuple值进行转换
this.collector.emit(new Values(tuple.getStringByField("word") + "!!!"));
} // 声明发送消息的字段名
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("word"));
}
}
ConsumeBolt.java
package test; import java.io.FileWriter;
import java.io.IOException;
import java.util.Map;
import java.util.UUID; import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Tuple; public class ConsumeBolt extends BaseRichBolt { private static final long serialVersionUID = -7114915627898482737L; private FileWriter fileWriter = null; private OutputCollector collector; public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { this.collector = collector; try {
fileWriter = new FileWriter("/usr/local/tmpdata/" + UUID.randomUUID());
// fileWriter = new FileWriter("C:\\Users\\26401\\Desktop\\test\\" + UUID.randomUUID());
} catch (IOException e) {
throw new RuntimeException(e);
}
} public void execute(Tuple tuple) { try {
String word = tuple.getStringByField("word") + "......." + "\n";
fileWriter.write(word);
fileWriter.flush();
System.out.println(word);
} catch (IOException e) {
throw new RuntimeException(e);
}
} public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) { }
}
在集群中运行
storm jar test-1.0.0-jar-with-dependencies.jar test.App // 启动集群
storm kill teststorm // 结束集群
storm集群配置以及java编写拓扑例子的更多相关文章
- kafka集群配置和java编写生产者消费者操作例子
kafka 安装 修改配置文件 java操作kafka kafka kafka的操作相对来说简单很多 安装 下载kafka http://kafka.apache.org/downloads tar ...
- 流量监控系统---storm集群配置
1.集群部署的基本流程 集群部署的流程:下载安装包.解压安装包.修改配置文件.分发安装包.启动集群 注意: 所有的集群上都需要配置hosts vi /etc/hosts 192.168.223.20 ...
- storm集群搭建和java应用
1. vim /etc/hosts ssh免密登录192.168.132.154 c0192.168.132.156 c1192.168.132.155 c2 storm集群:192.168.132. ...
- storm 集群配置
配置storm集群的过程中出现写问题,记录下来 1.storm是通过zookeeper管理的,先要安装zookeeper,从zk官网上下来,我这里下下来的的3.4.9,下载后移动到/usr/local ...
- spark集群配置以及java操作spark小demo
spark 安装 配置 使用java来操作spark spark 安装 tar -zxvf spark-2.4.0-bin-hadoop2.7.tgz rm spark-2.4.0-bin-hadoo ...
- Zookeeper的集群配置和Java测试程序
Zookeeper是Apache下的项目之一,倾向于对大型应用的协同维护管理工作.IBM则给出了IBM对ZooKeeper的认知: Zookeeper 分布式服务框架是 Apache Hadoop 的 ...
- Zookeeper的集群配置和Java测试程序 (一)
概述 Zookeeper是Apache下的项目之一,倾向于对大型应用的协同维护管理工作.IBM则给出了IBM对ZooKeeper的认知: Zookeeper 分布式服务框架是 Apache Hadoo ...
- storm集群配置
环境:centos6.4软件:jzmq-master-----java与c++通讯的桥梁,有了它,就可以使用zeromp了storm-0.8.2zeromq-2.1.7-----号称史上最牛逼的消息队 ...
- java 学习笔记(五) Zookeeper的集群配置和Java测试程序
参考博客 http://blog.csdn.net/catoop/article/details/50848555 http://blog.csdn.net/randompeople/article/ ...
随机推荐
- cassandra压力测试
http://docs.datastax.com/en/archived/cassandra/2.2/cassandra/tools/toolsCStress.html?hl=stress Simpl ...
- 2017.11.18 IAP下载(STM8,PIC,STM32)
客户要求用IAP下载,mark一下,客户还给了stm32的引导码.仅供参考. 1 PIC单片机的IAP 2 STm32 IAP https://www.cnblogs.com/WeyneChen/p ...
- win7 秘钥
链接 安装好Windows7后右击计算机--属性--更改产品密匙 输入以下密匙; RHTBY-VWY6D-QJRJ9-JGQ3X-Q2289 HT6VR-XMPDJ-2VBFV-R9PFY-3VP7R ...
- Head First HTML与CSS 学习笔记
1.使用<a>元素创建一个超文本链接,链接到另一个Web页面.<a>元素的内容会成为Web页面中可单击的文本.href属性告诉浏览器链接的目标文件. 2.块元素特立独行,内联元 ...
- CallKit iOS 教程
原文:CallKit Tutorial for iOS 作者:József Vesza 译者:kmyhy 对 VoIP App 开发者来说,iOS 的支持并不友好.尤其是它的通知发送这一块,太糙了.你 ...
- VMware Workstation/Fusion 14/15 密钥
VMware WorkStation 14 CG54H-D8D0H-H8DHY-C6X7X-N2KG6 ZC3WK-AFXEK-488JP-A7MQX-XL8YF AC5XK-0ZD4H-088HP- ...
- mac 开启SSH服务
SSH服务适用于所有类UNIX系统,例如Ubuntu.CentOS.RedHat,包括Mac OX,在这里简单介绍一下它的部分适用方法. 首先,介绍一下Mac OX,因为SSH在苹果系统上嵌入了,不需 ...
- Django cookie与session
cookie与session关系 cookie 是保存在客户端浏览器的键值对,浏览器发送请求时候会自动携带. session 1.生成随机字符串 2.回给浏览器,让它写到cookie # {" ...
- 数据结构之最小生成树Kruskal算法
1. 克鲁斯卡算法介绍 克鲁斯卡尔(Kruskal)算法,是用来求加权连通图的最小生成树的算法. 基本思想:按照权值从小到大的顺序选择n-1条边,并保证这n-1条边不构成回路. 具体做法:首先构造一个 ...
- fileUtil文件的上传下载
package com.beisun.mbp.util; import java.io.BufferedInputStream;import java.io.BufferedOutputStream; ...