删除了原有的offset之后再次启动会报错park Streaming from Kafka has error numRecords must not ...
- 从zk获取topic/partition 的fromOffset(获取方法链接)
- 利用SimpleConsumer获取每个partiton的lastOffset(untilOffset )
- 判断每个partition lastOffset与fromOffset的关系
- 当lastOffset < fromOffset时,将fromOffset赋值为0
获取kafka topic partition lastoffset代码:
package org.frey.example.utils.kafka; import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.Broker;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer; import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map; /**
* KafkaOffsetTool
*
* @author angel
* @date 2016/4/11
*/
public class KafkaOffsetTool { private static KafkaOffsetTool instance;
final int TIMEOUT = 100000;
final int BUFFERSIZE = 64 * 1024; private KafkaOffsetTool() {
} public static synchronized KafkaOffsetTool getInstance() {
if (instance == null) {
instance = new KafkaOffsetTool();
}
return instance;
} public Map<TopicAndPartition, Long> getLastOffset(String brokerList, List<String> topics,
String groupId) { Map<TopicAndPartition, Long> topicAndPartitionLongMap = Maps.newHashMap(); Map<TopicAndPartition, Broker> topicAndPartitionBrokerMap =
KafkaOffsetTool.getInstance().findLeader(brokerList, topics); for (Map.Entry<TopicAndPartition, Broker> topicAndPartitionBrokerEntry : topicAndPartitionBrokerMap
.entrySet()) {
// get leader broker
Broker leaderBroker = topicAndPartitionBrokerEntry.getValue(); SimpleConsumer simpleConsumer = new SimpleConsumer(leaderBroker.host(), leaderBroker.port(),
TIMEOUT, BUFFERSIZE, groupId); long readOffset = getTopicAndPartitionLastOffset(simpleConsumer,
topicAndPartitionBrokerEntry.getKey(), groupId); topicAndPartitionLongMap.put(topicAndPartitionBrokerEntry.getKey(), readOffset); } return topicAndPartitionLongMap; } /**
* 得到所有的 TopicAndPartition
*
* @param brokerList
* @param topics
* @return topicAndPartitions
*/
private Map<TopicAndPartition, Broker> findLeader(String brokerList, List<String> topics) {
// get broker's url array
String[] brokerUrlArray = getBorkerUrlFromBrokerList(brokerList);
// get broker's port map
Map<String, Integer> brokerPortMap = getPortFromBrokerList(brokerList); // create array list of TopicAndPartition
Map<TopicAndPartition, Broker> topicAndPartitionBrokerMap = Maps.newHashMap(); for (String broker : brokerUrlArray) { SimpleConsumer consumer = null;
try {
// new instance of simple Consumer
consumer = new SimpleConsumer(broker, brokerPortMap.get(broker), TIMEOUT, BUFFERSIZE,
"leaderLookup" + new Date().getTime()); TopicMetadataRequest req = new TopicMetadataRequest(topics); TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata(); for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
TopicAndPartition topicAndPartition =
new TopicAndPartition(item.topic(), part.partitionId());
topicAndPartitionBrokerMap.put(topicAndPartition, part.leader());
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
if (consumer != null)
consumer.close();
}
}
return topicAndPartitionBrokerMap;
} /**
* get last offset
* @param consumer
* @param topicAndPartition
* @param clientName
* @return
*/
private long getTopicAndPartitionLastOffset(SimpleConsumer consumer,
TopicAndPartition topicAndPartition, String clientName) {
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
kafka.api.OffsetRequest.LatestTime(), 1)); OffsetRequest request = new OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(),
clientName); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) {
System.out
.println("Error fetching data Offset Data the Broker. Reason: "
+ response.errorCode(topicAndPartition.topic(), topicAndPartition.partition()));
return 0;
}
long[] offsets = response.offsets(topicAndPartition.topic(), topicAndPartition.partition());
return offsets[0];
}
/**
* 得到所有的broker url
*
* @param brokerlist
* @return
*/
private String[] getBorkerUrlFromBrokerList(String brokerlist) {
String[] brokers = brokerlist.split(",");
for (int i = 0; i < brokers.length; i++) {
brokers[i] = brokers[i].split(":")[0];
}
return brokers;
} /**
* 得到broker url 与 其port 的映射关系
*
* @param brokerlist
* @return
*/
private Map<String, Integer> getPortFromBrokerList(String brokerlist) {
Map<String, Integer> map = new HashMap<String, Integer>();
String[] brokers = brokerlist.split(",");
for (String item : brokers) {
String[] itemArr = item.split(":");
if (itemArr.length > 1) {
map.put(itemArr[0], Integer.parseInt(itemArr[1]));
}
}
return map;
} public static void main(String[] args) {
List<String> topics = Lists.newArrayList();
topics.add("ys");
topics.add("bugfix");
Map<TopicAndPartition, Long> topicAndPartitionLongMap =
KafkaOffsetTool.getInstance().getLastOffset("broker001:9092,broker002:9092", topics, "my.group.id"); for (Map.Entry<TopicAndPartition, Long> entry : topicAndPartitionLongMap.entrySet()) {
System.out.println(entry.getKey().topic() + "-"+ entry.getKey().partition() + ":" + entry.getValue());
}
}
} 矫正offset核心代码:
/** 以下 矫正 offset */
// 得到Topic/partition 的lastOffsets
Map<TopicAndPartition, Long> topicAndPartitionLongMap =
KafkaOffsetTool.getInstance().getLastOffset(kafkaParams.get("metadata.broker.list"),
topicList, "my.group.id"); // 遍历每个Topic.partition
for (Map.Entry<TopicAndPartition, Long> topicAndPartitionLongEntry : fromOffsets.entrySet()) {
// fromOffset > lastOffset时
if (topicAndPartitionLongEntry.getValue() >
topicAndPartitionLongMap.get(topicAndPartitionLongEntry.getKey())) {
//矫正fromoffset为offset初始值0
topicAndPartitionLongEntry.setValue(0L);
}
}
/** 以上 矫正 offset */
删除了原有的offset之后再次启动会报错park Streaming from Kafka has error numRecords must not ...的更多相关文章
- 【转】Eclipse下启动tomcat报错:/bin/bootstrap.jar which is referenced by the classpath, does not exist.
转载地址:http://blog.csdn.net/jnqqls/article/details/8946964 1.错误: 在Eclipse下启动tomcat的时候,报错为:Eclipse下启动to ...
- Sql Server 2008卸载后再次安装一直报错
sql server 2008卸载之后再次安装一直报错问题. 第一:由于上一次的卸载不干净,可参照百度完全卸载sql server2008 的方式 1. 用WindowsInstaller删除所有与S ...
- Eclipse中启动tomcat报错:A child container failed during start
我真的很崩溃,先是workspace崩了,费了好久重建的workspace,然后建立了一个小demo项目,tomcat中启动却报错,挑选其中比较重要的2条信息如下: A child container ...
- 启动Mysql报错:Another MySQL daemon already running with the same unix socket.
启动Mysql报错: Another MySQL daemon already running with the same unix socket. 删除如下文件即可解决 /var/lib/mysql ...
- 启动MySQL报错
安装完MySQL,启动MySQL报错,报错信息如下:Starting MySQL....The server quit without updating PID file (/data/mysqlda ...
- C# 解决SharpSvn启动窗口报错 Unable to connect to a repository at URL 'svn://....'
在远程机打开sharpsvn客户端测试,结果报错 Svn启动窗口报错 Unable to connect to a repository at URL 'svn://...' 咋整,我在win10我的 ...
- Svn启动窗口报错 Could not load file or assembly 'SharpSvn.dll' or one of its
win10 64位系统生成没问题,测试都没问题,结果换到win7 64位系统上,点开就出现,网上搜了下,通过以下方式解决, 必须把bin 文件夹全部删除,重新生成.要不还是会报错. Solve it. ...
- Eclipse启动项目正常,放到tomcat下单独启动就报错的 一例
一个老的ssh的项目,进行二次开发(增加一些新功能)后, 首先用Eclipse中集成的Tomcat启动没有任何问题,但是把启动后的webapps下得目录放到 windows的普通tomcat下单独启动 ...
- (转)启动网卡报错(Failed to start LSB: Bring up/down networking )解决办法总结
启动网卡报错(Failed to start LSB: Bring up/down networking )解决办法总结 原文:http://blog.51cto.com/11863547/19059 ...
随机推荐
- MySQL--表操作(innodb表字段数据类型、约束条件)、sql_mode操作
一.创建表的完整语法 #[]内的可有可无,即创建表时字段名和类型是必须填写的,宽度与约束条件是可选择填写的.create table 表名(字段名1 类型[(宽度) 约束条件],字段名2 类型[(宽度 ...
- CentOS入门
1.因修改/etc/sudoers权限导致sudo和su不能使用问题 https://blog.csdn.net/u014029448/article/details/80944380 2.给用户分配 ...
- Eclipse中三种设置编码格式的方法
转自:https://blog.csdn.net/rainy_black_dog/article/details/52403735 很早以前听过一位老师说过:咱们中国人不管学习哪种编程语言,总会遇到乱 ...
- Ex4_21 最短路径算法可以应用于货币交易领域..._第十二次作业
(a) 建立一个有向图G(V,E),每个顶点表示一种货币,两个顶点之间的边权的大小ex[u][v]表示两种货币之间的汇率,若要找一个最有利的兑换序列,把货币s兑换成货币t,即在若干种兑换序列中选择 ...
- [HTTP]HTTP 中的 Transfer-Encoding 报文头
一.背景: 持续连接的问题:对于非持续连接,浏览器可以通过连接是否关闭来界定请求或响应实体的边界:而对于持续连接,这种方法显然不奏效.有时,尽管我已经发送完所有数据,但浏览器并不知道这一点,它无法得知 ...
- Dnsmasq加速本地DNS请求
文章目录 Dnsmasq安装 Dnsmasq配置 Dnsmasq启动 Dnsmasq使用 Dnsmasq小结 默认的情况下,我们平时上网用的本地DNS服务器都是使用电信或者联通的,但是这样也导致了 ...
- PID控制器开发笔记之九:基于前馈补偿的PID控制器的实现
对于一般的时滞系统来说,设定值的变动会产生较大的滞后才能反映在被控变量上,从而产生合理的调节.而前馈控制系统是根据扰动或给定值的变化按补偿原理来工作的控制系统,其特点是当扰动产生后,被控变量还未变化以 ...
- PID控制器开发笔记之五:变积分PID控制器的实现
在普通的PID控制算法中,由于积分系数Ki是常数,所以在整个控制过程中,积分增量是不变的.然而,系统对于积分项的要求是,系统偏差大时,积分作用应该减弱甚至是全无,而在偏差小时,则应该加强.积分系数取大 ...
- NPOI写Excel,Spire.XLS for.NET组件转换Excel为PDF
首先要引用NPOI动态库和Microsoft.Office.Interop.excel.dll (Microsoft.Office.Interop.excel.dll 最好使用使用NuGet下载 , ...
- Confluence 6 升级 Confluence 使用数据源
如果你对 Confluence 进行升级(手动或者使用安装器),你需要: 停止 Confluence (如果你已经尝试开始启动). 拷贝你的数据库驱动到 <installation-direct ...