kafkaconsumer SimpleExample
package kafka.simple; import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map; import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.ErrorMapping;
import kafka.common.TopicAndPartition;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.TopicMetadataRequest;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndOffset; public class SimpleExample { private List<String> m_replicaBrokers = new ArrayList<String>(); public SimpleExample() {
m_replicaBrokers = new ArrayList<String>();
} public static void main(String args[]) {
SimpleExample example = new SimpleExample();
// 最大读取消息数量
long maxReads = Long.parseLong("3");
// 要订阅的topic
String topic = "test";
// 要查找的分区
int partition = Integer.parseInt("0");
// broker节点的ip
List<String> seeds = new ArrayList<String>();
seeds.add("localhost");
//seeds.add("192.168.4.31");
//seeds.add("192.168.4.32");
// 端口
int port = Integer.parseInt("9092");
try {
example.run(maxReads, topic, partition, seeds, port);
} catch (Exception e) {
System.out.println("Oops:" + e);
e.printStackTrace();
}
} public void run(long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers,
int a_port) throws Exception {
// 获取指定Topic partition的元数据
PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
System.out.println("Can't find metadata for Topic and Partition. Exiting");
return;
}
if (metadata.leader() == null) {
System.out.println("Can't find Leader for Topic and Partition. Exiting");
return;
}
String leadBroker = metadata.leader().host();
String clientName = "Client_" + a_topic + "_" + a_partition; SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
long readOffset = getLastOffset(consumer, a_topic, a_partition,
kafka.api.OffsetRequest.EarliestTime(), clientName);
int numErrors = 0;
while (a_maxReads > 0) {
if (consumer == null) {
consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
}
FetchRequest req = new FetchRequestBuilder().clientId(clientName)
.addFetch(a_topic, a_partition, readOffset, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) {
numErrors++;
// Something went wrong!
short code = fetchResponse.errorCode(a_topic, a_partition);
System.out
.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
if (numErrors > 5) {
break;
}
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// We asked for an invalid offset. For simple case ask for
// the last element to reset
readOffset = getLastOffset(consumer, a_topic, a_partition,
kafka.api.OffsetRequest.LatestTime(), clientName);
continue;
}
consumer.close();
consumer = null;
leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
continue;
}
numErrors = 0; long numRead = 0;
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
long currentOffset = messageAndOffset.offset();
if (currentOffset < readOffset) {
System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
continue;
} readOffset = messageAndOffset.nextOffset();
ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out
.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
numRead++;
a_maxReads--;
} if (numRead == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
if (consumer != null) {
consumer.close();
}
} public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo,
kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) {
System.out.println("Error fetching data Offset Data the Broker. Reason: " + response
.errorCode(topic, partition));
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
} /**
* @return String
* @throws Exception 找一个leader broker
*/
private String findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_port)
throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(m_replicaBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
// first time through if the leader hasn't changed give
// ZooKeeper a second to recover
// second time, assume the broker did recover before failover,
// or it was a non-Broker issue
//
goToSleep = true;
} else {
return metadata.leader().host();
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
System.out.println("Unable to find new leader after Broker failure. Exiting");
throw new Exception("Unable to find new leader after Broker failure. Exiting");
} private PartitionMetadata findLeader(List<String> a_seedBrokers, int a_port, String a_topic,
int a_partition) {
PartitionMetadata returnMetaData = null;
loop:
for (String seed : a_seedBrokers) {
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
List<String> topics = Collections.singletonList(a_topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == a_partition) {
returnMetaData = part;
break loop;
}
}
}
} catch (Exception e) {
System.out.println(
"Error communicating with Broker [" + seed + "] to find Leader for [" + a_topic + ", "
+ a_partition + "] Reason: " + e);
} finally {
if (consumer != null) {
consumer.close();
}
}
}
if (returnMetaData != null) {
m_replicaBrokers.clear();
for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
m_replicaBrokers.add(replica.host());
}
}
return returnMetaData;
}
}
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.8.2.1</version>
</dependency>
kafkaconsumer SimpleExample的更多相关文章
- 用java代码手动控制kafkaconsumer偏移量
为应对消费结果需要存储到关系数据库中,避免数据库down时consumer继续消费的场景 http://kafka.apache.org 查了很多源码都记录下来,省的下次还要过滤源码. //如果将结果 ...
- Kafka 0.10 KafkaConsumer流程简述
ConsumerConfig.scala 储存Consumer的配置 按照我的理解,0.10的Kafka没有专门的SimpleConsumer,仍然是沿用0.8版本的. 1.从poll开始 消费的规则 ...
- 【Kafka源码】KafkaConsumer
[TOC] KafkaConsumer是从kafka集群消费消息的客户端.这是kafka的高级消费者,而SimpleConsumer是kafka的低级消费者.何为高级?何为低级? 我们所谓的高级,就是 ...
- KafkaConsumer 长时间地在poll(long )方法中阻塞
一,问题描述 搭建的用来测试的单节点Kafka集群(Zookeeper和Kafka Broker都在同一台Ubuntu上),在命令行下使用: ./bin/kafka-topics. --replica ...
- CDH下集成spark2.2.0与kafka(四十一):在spark+kafka流处理程序中抛出错误java.lang.NoSuchMethodError: org.apache.kafka.clients.consumer.KafkaConsumer.subscribe(Ljava/util/Collection;)V
错误信息 19/01/15 19:36:40 WARN consumer.ConsumerConfig: The configuration max.poll.records = 1 was supp ...
- 基于Confluent.Kafka实现的KafkaConsumer消费者类和KafkaProducer消息生产者类型
一.引言 研究Kafka有一段时间了,略有心得,基于此自己就写了一个Kafka的消费者的类和Kafka消息生产者的类,进行了单元测试和生产环境的测试,还是挺可靠的. 二.源码 话不多说,直接上代码,代 ...
- Python KafkaProducer and KafkaConsumer的开发模块
1.在python中往kakfa写数据和读取数据,使用的是python-kafka库 2.消费者需持续写入数据,因groupid存在偏移量,才能看看到数据. 3.安装库的命令为pip install ...
- 从KafkaConsumer看看Kafka(一)
Kafka的消息模型为发布订阅模型,消息生产者将消息发布到主题(topic)中,一个或多个消费者订阅(消费)该主题消息并消费,此模型中发布到topic中的消息会被所有消费者所订阅到,先介绍Kafk ...
- 单独KafkaConsumer实例and多worker线程。
1.单独KafkaConsumer实例and多worker线程.将获取的消息和消息的处理解耦,将消息的处理放入单独的工作者线程中,即工作线程中,同时维护一个或者若各干consumer实例执行消息获取任 ...
随机推荐
- Ros学习——导航
1.导航框架 在总体框架图中可以看到,move_base提供了ROS导航的配置.运行.交互接口,它主要包括两个部分: (1) 全局路径规划(global planner):根据给定的目标位置 ...
- 九款常用的JS代码高亮工具
代码高亮很重要,特别是当我们想要在网站或博客中展示我们的代码的时候.通过在网站或博客中启用代码高亮,读者更方便的读取代码块. 有很多免费而且有用的代码高亮脚本.这些脚本大部分由Javascripts编 ...
- Java3D-对象基本变换
一个球体与三个圆柱体形成一个组合体,在该组合体中,球体的透明度属性是由全透明到不透明之间变换,而且包括:旋转.平移等变换. package com.vfsd.test0621; import java ...
- 关于MySQL隐式转换
一.如果表定义的是varchar字段,传入的是数字,则会发生隐式转换. 1.表DDL 2.传int的sql 3.传字符串的sql 仔细看下表结构,rid的字段类型: 而用户传入的是int,这里会有一个 ...
- R: 对向量中的每个元素,检查其是否包含某个“单词”
#检测一个字符串中,是否包含某个子串,是返回T,否返回Frequire(stringr) require(stringr) test <- c("这里有天气热敏感冒",&qu ...
- 孕妇的孕周和体重对胎儿游离DNA在母体cfDNA占比的影响
有一篇文件,研究了孕妇的孕周和体重对 胎儿游离DNA在母体cfDNA中占比的影响 文章名称:Gestational age and maternal weight effects on fetal c ...
- Luogu 1484 种树
Luogu 1792 算是双倍经验. 我们考虑对于一个点,我们要么选它,要么选它周围的两个点. 所以我们考虑用一个堆来维护,每次从堆顶取出最大值之后我们把它的权值记为:它左边的权值加上它右边的权值减去 ...
- scala中枚举
scala没有从语法的角度来支持枚举,而是通过定义了一个接口Enumeration来支持的 object ExecutorState extends Enumeration{ type Executo ...
- .replace(/-/g,"/")的用法
/-/g正则表达式 g 代表 global 全部替换 var str1 ="2012-08-12 23:13"; str1 = str1.replace(/-/g,& ...
- c# 的默认访问修饰符小结(转)
c# 的访问修饰符是private 还是 internal? 准确的说,不能一概而论. [MSDN] Classes and structs that are not nested within ot ...