使用java客户端, kafkaproducer, kafkaconsumer进行kafka的连接

注: 0.10 版本之后, 连接kafka只需要brokerip即可, 不需要zookeeper的信息

1, kafka 配置信息

{
"producer": {
"bootstrap.servers": "10.183.93.127:9093,10.183.93.128:9093,10.183.93.130:9093",
"key.serializer": "org.apache.kafka.common.serialization.StringSerializer",
"value.serializer": "org.apache.kafka.common.serialization.StringSerializer",
"max.request.size": "",
"batch.size": "",
"buffer.memory": "",
"max.block.ms": "",
"retries": "",
"acks": "",
},
"cosumer": {
"bootstrap.servers": "10.183.93.127:9093,10.183.93.128:9093,10.183.93.130:9093",
"group.id": "test222",
"session.timeout.ms": "",
"key.deserializer": "org.apache.kafka.common.serialization.StringDeserializer",
"value.deserializer": "org.apache.kafka.common.serialization.StringDeserializer"
}
}

2, kafka utils, 用来读取kafka的配置信息

package com.wenbronk.kafka;

import com.alibaba.fastjson.JSON;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import org.junit.Test; import java.io.FileNotFoundException;
import java.io.FileReader;
import java.util.Map;
import java.util.Properties; public class KafkaUtils {
@Test
public void test() throws FileNotFoundException {
getConfig("producer");
// fastJSON();
} public static JsonObject getConfig(String name) throws FileNotFoundException {
JsonParser parser = new JsonParser();
JsonElement parse = parser.parse(new FileReader("src/main/resources/kafka"));
JsonObject jsonObject = parse.getAsJsonObject().getAsJsonObject(name);
System.out.println(jsonObject);
return jsonObject;
} public static Properties getProperties(String sourceName) throws FileNotFoundException {
JsonObject config = KafkaUtils.getConfig(sourceName);
Properties properties = new Properties(); for (Map.Entry<String, JsonElement> entry : config.entrySet()) {
properties.put(entry.getKey(), entry.getValue().getAsString());
}
return properties;
} // public static void fastJSON() throws FileNotFoundException {
// Object o = JSON.toJSON(new FileReader("src/main/resources/kafka"));
// System.out.println(o);
// } }

3, kafka producer

package com.wenbronk.kafka;

import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.junit.Test; import javax.swing.text.StyledEditorKit;
import java.io.FileNotFoundException;
import java.util.*;
import java.util.stream.IntStream; /**
* 消息提供者
*/
public class KafkaProducerMain { @Test
public void send() throws Exception {
HashMap<String, String> map = new HashMap<>();
map.put("http_zhixin", "send message to kafka from producer");
for (int i = ; i < ; i++ ) {
sendMessage(map);
}
// sendMessage(map);
} /**
* 消息发送
*/
public void sendMessage(Map<String, String> topicMsg) throws FileNotFoundException {
Properties properties = KafkaUtils.getProperties("producer");
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(properties); for (Map.Entry<String, String> entry : topicMsg.entrySet()) {
String topic = entry.getKey();
String message = entry.getValue();
ProducerRecord<String, String> record = new ProducerRecord<String, String>(topic, message);
// 发送
// producer.send(record, new CallBackFuntion(topic, message));
producer.send(record, (recordMetadata, e) -> {
if (e != null) {
System.err.println(topic + ": " + message + "--消息发送失败");
}else {
System.err.println(topic + ": " + message + "--消息发送成功");
}
});
}
producer.flush();
producer.close();
}
}

回掉函数可写匿名内部类, 也可写外部类通过新建的方式运行

package com.wenbronk.kafka;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.RecordMetadata; /**
* 回掉函数
*/
public class CallBackFuntion implements Callback { private String topic;
private String message; public CallBackFuntion(String topic, String message) {
this.topic = topic;
this.message = message;
} @Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
System.out.println(topic + ": " + message + "--消息发送失败");
}else {
System.out.println(topic + ": " + message + "--消息发送成功");
}
}
}

4, kafka consumer

package com.wenbronk.kafka;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test; import java.io.FileNotFoundException;
import java.util.*; public class KafkaConsumerMain { /**
* 自动提交offset
*/
public void commitAuto(List<String> topics) throws FileNotFoundException {
Properties props = KafkaUtils.getProperties("cosumer");
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", ""); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(topics);
while (true) {
ConsumerRecords<String, String> records = consumer.poll();
for (ConsumerRecord<String, String> record : records)
System.err.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
}
} /**
* 手动提交offset
*
* @throws FileNotFoundException
*/
public void commitControl(List<String> topics) throws FileNotFoundException {
Properties props = KafkaUtils.getProperties("cosumer");
props.put("enable.auto.commit", "false"); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(topics);
final int minBatchSize = ;
List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
while (true) {
ConsumerRecords<String, String> records = consumer.poll();
for (ConsumerRecord<String, String> record : records) {
buffer.add(record);
}
if (buffer.size() >= minBatchSize) {
insertIntoDb(buffer);
// 阻塞同步提交
consumer.commitSync();
buffer.clear();
}
}
} /**
* 手动设置分区
*/
public void setOffSet(List<String> topics) throws FileNotFoundException {
Properties props = KafkaUtils.getProperties("cosumer");
props.put("enable.auto.commit", "false"); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(topics); while (true) {
ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
// 处理每个分区消息后, 提交偏移量
for (TopicPartition partition : records.partitions()) {
List<ConsumerRecord<String, String>> partitionRecords = records.records(partition); for (ConsumerRecord<String, String> record : partitionRecords) {
System.out.println(record.offset() + ": " + record.value());
}
long lastOffset = partitionRecords.get(partitionRecords.size() - ).offset();
consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + )));
}
}
} /**
* 手动设置消息offset
*/
public void setSeek(List<String> topics) throws FileNotFoundException {
Properties props = KafkaUtils.getProperties("cosumer");
props.put("enable.auto.commit", "false"); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(topics);
consumer.seek(new TopicPartition("http_zhixin", ), );
ConsumerRecords<String, String> records = consumer.poll(); for (ConsumerRecord<String, String> record : records) {
System.err.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
consumer.commitSync();
} } @Test
public void test() throws FileNotFoundException {
ArrayList<String> topics = new ArrayList<>();
topics.add("http_zhixin"); // commitAuto(topics);
// commitControl(topics);
// setOffSet(topics);
setSeek(topics);
} /**
* doSomethings
*/
private void insertIntoDb(List<ConsumerRecord<String, String>> buffer) {
buffer.stream().map(x -> x.value()).forEach(System.err::println);
} }

kafka 处于同一组的消费者, 不可以重复读取消息, 0.11版本中加入了事物控制

kafka-java客户端连接的更多相关文章

  1. 【RabbitMQ】CentOS安装RabbitMQ,及简单的Java客户端连接

    在CentOS安装 因Rabbit MQ使用Erlang,所以需要先安装Erlang,安装过程中可能会遇到种种问题,可参考CentOS 6.5安装Erlang/OTP 17.0.然后就可以安装MQ了. ...

  2. Elasticsearch - java客户端连接

    写在前面的话:读书破万卷,编码如有神-------------------------------------------------------------------- 最简单的在java客户端连 ...

  3. 通过java客户端连接hbase 注意事项

    1.通过Java客户端连接Hbase,其中hbase通过zookeeper去管理,需要注意的是客户端端口. 通过在浏览器端输入地址查看:http://192.168.3.206:60010/maste ...

  4. Zookeeper学习记录及Java客户端连接示例

    1. Zookeeper 1.1 简介 ZooKeeper is a centralized service for maintaining configuration information, na ...

  5. Java客户端连接kafka集群报错

    往kafka集群发送消息时,报错如下: page_visits-1: 30005 ms has passed since batch creation plus linger time 加入log4j ...

  6. 关于Java客户端连接虚拟机中的Kafka时,无法发送、接收消息的问题

    kafka通过控制台模拟消息发送和消息接收正常,但是通过javaAPI操作生产者发送消息不成功 消费者接收不到数据解决方案? 1.问题排查 (1)首先通过在服务器上使用命令行来模拟生产.消费数据,发现 ...

  7. RabbitMQ/JAVA 客户端连接测试

    这里是一个简单的helloworld测试. 这里在eclipse平台中实现 细节不再赘述.重点是导入rabbitmq-java-client的jar包 下载地址:http://www.rabbitmq ...

  8. java客户端连接MongoDB数据库的简单使用

    1.下载mongoDB的jar包,并引入到工程的CLASSPATH中下载:mongodb2.5驱动包下载 如果使用maven项目,最新的依赖如下: <dependency> <gro ...

  9. java socket通讯(二)处理多个客户端连接

    通过java socket通讯(一) 入门示例,就可以实现服务端和客户端的socket通讯,但是上一个例子只能实现一个服务端和一个客户端之间的通讯,如果有多个客户端连接服务端,则需要通过多线程技术来实 ...

  10. Java操作ElasticSearch之创建客户端连接

    Java操作ElasticSearch之创建客户端连接 3 发布时间:『 2017-09-11 17:02』  博客类别:elasticsearch  阅读(3157) Java操作ElasticSe ...

随机推荐

  1. Forward团队-爬虫豆瓣top250项目-设计文档

    组长地址:http://www.cnblogs.com/mazhuangmz/p/7603594.html 成员:马壮,李志宇,刘子轩,年光宇,邢云淇,张良 设计方案: 1.能分析HTML语言: 2. ...

  2. day28(ajax之js原生代码实现)

    ajax ajax:异步页面无刷新技术 AJAX:异步的 JavaScript And XML. * 使用的是老的技术,用的是新的思想. AJAX的功能:完成页面的局部刷新,不中断用户的体验. XML ...

  3. (01背包)Buy the souvenirs (hdu 2126)

    http://acm.hdu.edu.cn/showproblem.php?pid=2126 Buy the souvenirs Time Limit: 10000/1000 MS (Java/Oth ...

  4. Codeforces Round #535 (Div. 3) 1108C - Nice Garland

    #include <bits/stdc++.h> using namespace std; int main() { #ifdef _DEBUG freopen("input.t ...

  5. thrift使用总结

    转自 http://blog.csdn.net/qq_27784479/article/details/73250958 Apache Thrift软件框架用于可扩展的跨语言服务开发,简单来说就是RP ...

  6. hadoop一代集群运行代码案例

    hadoop一代集群运行代码案例 集群 一个 master,两个slave,IP分别是192.168.1.2.192.168.1.3.192.168.1.4               hadoop版 ...

  7. linux 三剑客之awk

    #AWK命令 基础显示 打印install.log文件中包含data字段行的第二区域 awk '/data/ {print $2}' install.log 查看num10.txt的第一行 head ...

  8. gitlab 部署

    Gitlab 安装 GitLab是利用 Ruby on Rails 一个开源的版本管理系统,实现一个自托管的Git项目仓库,可通过Web界面进行访问公开的或者私人项目.它拥有与Github类似的功能, ...

  9. [翻译] ASP.NET WebAPI 中的异常处理

    原文链接:https://docs.microsoft.com/en-us/aspnet/web-api/overview/error-handling/exception-handling 本文介绍 ...

  10. 过滤器中获取form表单或url请求数据

    var httpFormData = filterContext.HttpContext.Request.Form; var logContent = string.Empty; //获取url的 l ...