kafkaconsumer SimpleExample
package kafka.simple; import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map; import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.ErrorMapping;
import kafka.common.TopicAndPartition;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.TopicMetadataRequest;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndOffset; public class SimpleExample { private List<String> m_replicaBrokers = new ArrayList<String>(); public SimpleExample() {
m_replicaBrokers = new ArrayList<String>();
} public static void main(String args[]) {
SimpleExample example = new SimpleExample();
// 最大读取消息数量
long maxReads = Long.parseLong("3");
// 要订阅的topic
String topic = "test";
// 要查找的分区
int partition = Integer.parseInt("0");
// broker节点的ip
List<String> seeds = new ArrayList<String>();
seeds.add("localhost");
//seeds.add("192.168.4.31");
//seeds.add("192.168.4.32");
// 端口
int port = Integer.parseInt("9092");
try {
example.run(maxReads, topic, partition, seeds, port);
} catch (Exception e) {
System.out.println("Oops:" + e);
e.printStackTrace();
}
} public void run(long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers,
int a_port) throws Exception {
// 获取指定Topic partition的元数据
PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
System.out.println("Can't find metadata for Topic and Partition. Exiting");
return;
}
if (metadata.leader() == null) {
System.out.println("Can't find Leader for Topic and Partition. Exiting");
return;
}
String leadBroker = metadata.leader().host();
String clientName = "Client_" + a_topic + "_" + a_partition; SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
long readOffset = getLastOffset(consumer, a_topic, a_partition,
kafka.api.OffsetRequest.EarliestTime(), clientName);
int numErrors = 0;
while (a_maxReads > 0) {
if (consumer == null) {
consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
}
FetchRequest req = new FetchRequestBuilder().clientId(clientName)
.addFetch(a_topic, a_partition, readOffset, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) {
numErrors++;
// Something went wrong!
short code = fetchResponse.errorCode(a_topic, a_partition);
System.out
.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
if (numErrors > 5) {
break;
}
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// We asked for an invalid offset. For simple case ask for
// the last element to reset
readOffset = getLastOffset(consumer, a_topic, a_partition,
kafka.api.OffsetRequest.LatestTime(), clientName);
continue;
}
consumer.close();
consumer = null;
leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
continue;
}
numErrors = 0; long numRead = 0;
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
long currentOffset = messageAndOffset.offset();
if (currentOffset < readOffset) {
System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
continue;
} readOffset = messageAndOffset.nextOffset();
ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out
.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
numRead++;
a_maxReads--;
} if (numRead == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
if (consumer != null) {
consumer.close();
}
} public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo,
kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) {
System.out.println("Error fetching data Offset Data the Broker. Reason: " + response
.errorCode(topic, partition));
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
} /**
* @return String
* @throws Exception 找一个leader broker
*/
private String findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_port)
throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(m_replicaBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
// first time through if the leader hasn't changed give
// ZooKeeper a second to recover
// second time, assume the broker did recover before failover,
// or it was a non-Broker issue
//
goToSleep = true;
} else {
return metadata.leader().host();
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
System.out.println("Unable to find new leader after Broker failure. Exiting");
throw new Exception("Unable to find new leader after Broker failure. Exiting");
} private PartitionMetadata findLeader(List<String> a_seedBrokers, int a_port, String a_topic,
int a_partition) {
PartitionMetadata returnMetaData = null;
loop:
for (String seed : a_seedBrokers) {
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
List<String> topics = Collections.singletonList(a_topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == a_partition) {
returnMetaData = part;
break loop;
}
}
}
} catch (Exception e) {
System.out.println(
"Error communicating with Broker [" + seed + "] to find Leader for [" + a_topic + ", "
+ a_partition + "] Reason: " + e);
} finally {
if (consumer != null) {
consumer.close();
}
}
}
if (returnMetaData != null) {
m_replicaBrokers.clear();
for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
m_replicaBrokers.add(replica.host());
}
}
return returnMetaData;
}
}
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.8.2.1</version>
</dependency>
kafkaconsumer SimpleExample的更多相关文章
- 用java代码手动控制kafkaconsumer偏移量
为应对消费结果需要存储到关系数据库中,避免数据库down时consumer继续消费的场景 http://kafka.apache.org 查了很多源码都记录下来,省的下次还要过滤源码. //如果将结果 ...
- Kafka 0.10 KafkaConsumer流程简述
ConsumerConfig.scala 储存Consumer的配置 按照我的理解,0.10的Kafka没有专门的SimpleConsumer,仍然是沿用0.8版本的. 1.从poll开始 消费的规则 ...
- 【Kafka源码】KafkaConsumer
[TOC] KafkaConsumer是从kafka集群消费消息的客户端.这是kafka的高级消费者,而SimpleConsumer是kafka的低级消费者.何为高级?何为低级? 我们所谓的高级,就是 ...
- KafkaConsumer 长时间地在poll(long )方法中阻塞
一,问题描述 搭建的用来测试的单节点Kafka集群(Zookeeper和Kafka Broker都在同一台Ubuntu上),在命令行下使用: ./bin/kafka-topics. --replica ...
- CDH下集成spark2.2.0与kafka(四十一):在spark+kafka流处理程序中抛出错误java.lang.NoSuchMethodError: org.apache.kafka.clients.consumer.KafkaConsumer.subscribe(Ljava/util/Collection;)V
错误信息 19/01/15 19:36:40 WARN consumer.ConsumerConfig: The configuration max.poll.records = 1 was supp ...
- 基于Confluent.Kafka实现的KafkaConsumer消费者类和KafkaProducer消息生产者类型
一.引言 研究Kafka有一段时间了,略有心得,基于此自己就写了一个Kafka的消费者的类和Kafka消息生产者的类,进行了单元测试和生产环境的测试,还是挺可靠的. 二.源码 话不多说,直接上代码,代 ...
- Python KafkaProducer and KafkaConsumer的开发模块
1.在python中往kakfa写数据和读取数据,使用的是python-kafka库 2.消费者需持续写入数据,因groupid存在偏移量,才能看看到数据. 3.安装库的命令为pip install ...
- 从KafkaConsumer看看Kafka(一)
Kafka的消息模型为发布订阅模型,消息生产者将消息发布到主题(topic)中,一个或多个消费者订阅(消费)该主题消息并消费,此模型中发布到topic中的消息会被所有消费者所订阅到,先介绍Kafk ...
- 单独KafkaConsumer实例and多worker线程。
1.单独KafkaConsumer实例and多worker线程.将获取的消息和消息的处理解耦,将消息的处理放入单独的工作者线程中,即工作线程中,同时维护一个或者若各干consumer实例执行消息获取任 ...
随机推荐
- filter(函数,可以迭代的对象)
#!/usr/bin/env python #filter(函数,可以迭代的对象) def f1(x): if x > 22: return True else: return False re ...
- 为SSRS配置SMTP服务器身份验证
此处设置外邮地址却无法填写邮箱密码 一.安装SMTP服务 1.在服务管理器中单击“功能” 2.单击“添加功能”打开“添加功能向导”对话框 3.在“选择功能”页上选择“SMTP服务器”并选择“添加必须的 ...
- RxAndroid基本使用1
1,基本使用 public class MainActivity extends ActionBarActivity implements View.OnClickListener, View.OnT ...
- contentvalue的探究(结构,用途)
contentvalue类似HASHMAP,但是KEY只能为STRING 该类用于数据库操作时对数据的封装,可以避免使用SQL语句,为后期创建CONTENTPROVIDER提供便利. 如果没有上述需求 ...
- 企业级搜索引擎Solr 第三章 索引数据(Indexing Data)
虽然本书中假设你要建索引的内容都是有着良好结构的,比如数据库表,XML文件,CSV,但在现实中我们要保存很混乱的数据,或是二进制文件,如PDF,Microsoft Office,甚至是图片和音乐文件. ...
- JS对表单的操作
JS对表单中的style的操作,包括复选框技术 废话不多说直接上文件代码!!! 功能:全选\反选,鼠标监测变颜色 <html> <head> <meta charset= ...
- 面试题:AOP面向切面编程
//创建一个与代理对象相关联的InvocationHandler InvocationHandler stuHandler = new MyInvocationHandler<Person> ...
- Java面试问题列表
- SpringMVC路径问题回顾,加斜杠和不加斜杠的问题(六)
绝对路径:全的路径. 相对路径:有参照的路径. 加斜杠和不加斜杠的问题如下:(分前台和后台路径,明白这两个就知道什么意思了) 如果是页面,这个图片路径出现在jsp页面,所以是前台路径,前台路径的参照物 ...
- 使用swiper.animate时,给一个对象添加两个动画且动画循环的方法
swiper官网上给对象加一个动画的方法是 <div class="swiper-slide"> <p class="ani" swiper- ...