spring boot 集成kafka (多线程,消费者使用kafka的原生api实现,因为@KakfkaListener修改groupId无效)
application-test.properties
#kafka
kafka.consumer.zookeeper.connect=*:2181
kafka.consumer.servers=*:9092
kafka.consumer.enable.auto.commit=true
kafka.consumer.session.timeout=6000
kafka.consumer.auto.commit.interval=1000
#保证每个组一个消费者消费同一条消息,若设置为earliest,那么会从头开始读partition(none)
kafka.consumer.auto.offset.reset=latest
kafka.consumer.concurrency=10 kafka.producer.servers=*:9092
kafka.producer.retries=0
kafka.producer.batch.size=4096
#//往kafka服务器提交消息间隔时间,0则立即提交不等待
kafka.producer.linger=1
kafka.producer.buffer.memory=40960
启动类
@SpringBootApplication
@EnableScheduling
public class Application { @Autowired
private KafkaSender kafkaSender; public static void main(String[] args) {
SpringApplication.run(Application .class, args);
} //然后每隔1分钟执行一次
@Scheduled(fixedRate = 1000 * 60)
public void testKafka() throws Exception {
kafkaSender.sendTest();
}
}
生产者:
@Component
public class KafkaSender { @Resource
KafkaConsumerPool consumerPool; /**
* 这里需要放到程序启动完成之后执行 TODO
*/
@PostConstruct
void d(){ ConsumerGroup consumerThread = new ConsumerGroup("gropu-1","access_data",consumerConfig);
ConsumerGroup consumerThread2 = new ConsumerGroup("gropu-2","access_data", consumerConfig); /**
* 各起两个消费者 ,Kafka consumer是非线程安全的 Consumer 需要一个new 的
*/
consumerPool.SubmitConsumerPool(new Consumer(consumerThread));
consumerPool.SubmitConsumerPool(new Consumer(consumerThread)); consumerPool.SubmitConsumerPool(new Consumer(consumerThread2));
consumerPool.SubmitConsumerPool(new Consumer(consumerThread2));
} @Resource
KafkaConsumerConfig consumerConfig; @Autowired
private KafkaTemplate kafkaTemplate; @Autowired
private KafkaTopics kafkaTopics; /**
* 发送消息到kafka
*
*/
public void sendTest() throws InterruptedException, IOException, KeeperException { /**
* topic='access_data'
*/
kafkaTemplate.send("access_data",""+ System.currentTimeMillis());
kafkaTemplate.send("access_data",""+System.currentTimeMillis());
kafkaTemplate.send("access_data",""+System.currentTimeMillis());
kafkaTemplate.send("access_data",""+System.currentTimeMillis());
kafkaTemplate.send("access_data",""+System.currentTimeMillis());
kafkaTemplate.send("access_data",""+System.currentTimeMillis());
} }
KafkaProducerConfig
@Configuration
@EnableKafka
public class KafkaProducerConfig { @Value("${kafka.producer.servers}")
private String servers;
@Value("${kafka.producer.retries}")
private int retries;
@Value("${kafka.producer.batch.size}")
private int batchSize;
@Value("${kafka.producer.linger}")
private int linger;
@Value("${kafka.producer.buffer.memory}")
private int bufferMemory; public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ProducerConfig.RETRIES_CONFIG, retries);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
} public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
} @Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<String, String>(producerFactory());
}
}
KafkaConsumerConfig
@Configuration
@EnableKafka
public class KafkaConsumerConfig { @Value("${kafka.consumer.zookeeper.connect}")
public String zookeeperConnect;
@Value("${kafka.consumer.servers}")
public String servers;
@Value("${kafka.consumer.enable.auto.commit}")
public boolean enableAutoCommit;
@Value("${kafka.consumer.session.timeout}")
public String sessionTimeout;
@Value("${kafka.consumer.auto.commit.interval}")
public String autoCommitInterval;
@Value("${kafka.consumer.auto.offset.reset}")
public String autoOffsetReset;
@Value("${kafka.consumer.concurrency}")
public int concurrency; public String getZookeeperConnect() {
return zookeeperConnect;
} public void setZookeeperConnect(String zookeeperConnect) {
this.zookeeperConnect = zookeeperConnect;
} public String getServers() {
return servers;
} public void setServers(String servers) {
this.servers = servers;
} public boolean isEnableAutoCommit() {
return enableAutoCommit;
} public void setEnableAutoCommit(boolean enableAutoCommit) {
this.enableAutoCommit = enableAutoCommit;
} public String getSessionTimeout() {
return sessionTimeout;
} public void setSessionTimeout(String sessionTimeout) {
this.sessionTimeout = sessionTimeout;
} public String getAutoCommitInterval() {
return autoCommitInterval;
} public void setAutoCommitInterval(String autoCommitInterval) {
this.autoCommitInterval = autoCommitInterval;
} public String getAutoOffsetReset() {
return autoOffsetReset;
} public void setAutoOffsetReset(String autoOffsetReset) {
this.autoOffsetReset = autoOffsetReset;
} public int getConcurrency() {
return concurrency;
} public void setConcurrency(int concurrency) {
this.concurrency = concurrency;
}
}
Consumer
/**
* 实际消费者,继承了ShutdownableThread ,要多加几个消费者直接继承实现即可
*
* @create 2017-11-06 12:42
* @update 2017-11-06 12:42
**/
public class Consumer extends ShutdownableThread { /**
* kafka 消费者
*/
private KafkaConsumer consumer; /**
* topic
*/
private String topic; /**
* 组id
*/
private String groupId; public Consumer(ConsumerGroup consumerGroup) {
super("",false);
this.consumer = consumerGroup.getConsumer();
this.topic = consumerGroup.getTopic();
this.groupId = consumerGroup.getA_groupId();
} /**
* * 监听主题,有消息就读取
* 从kafka里面得到数据后,具体怎么去处理. 如果需要开启kafka处理消息的广播模式,多个监听要监听不同的group,
* 即方法上的注解@KafkaListener里的group一定要不一样.如果多个监听里的group写的一样,就会造成只有一个监听能处理其中的消息,
* 另外监听就不能处理消息了.也即是kafka的分布式消息处理方式.
* 在同一个group里的监听,共同处理接收到的消息,会根据一定的算法来处理.如果不在一个组,但是监听的是同一个topic的话,就会形成广播模式
*/
@Override
public void doWork() {
consumer.subscribe(Collections.singletonList(this.topic));
ConsumerRecords<Integer, String> records = consumer.poll(1000);
for (ConsumerRecord<Integer, String> record : records) {
System.out.println("Thread: "+Thread.currentThread().getName()
+"Received message: (" + this.groupId + ", " + record.value() + ") at offset "
+ record.offset()+" partition : "+records.partitions());
}
}
}
ConsumerGroup 设置消费组
public class ConsumerGroup { /**
* 日志处理
*/
private static final Log log = LogFactory.getLog(ConsumerGroup.class); /**
* topic
*/
private final String topic; /**
* 公共连接属性
*/
private Properties props ; /**
* 消费者组
*/
private final String groupId; public ConsumerGroup(String groupId, String topic, KafkaConsumerConfig consumerConfig) {
createConsumerConfig(groupId,consumerConfig);
this.topic = topic;
this.groupId = groupId;
} private Properties createConsumerConfig(String groupId, KafkaConsumerConfig consumerConfig) {
props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,consumerConfig.servers);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, consumerConfig.enableAutoCommit);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, consumerConfig.autoCommitInterval);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, consumerConfig.sessionTimeout);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.IntegerDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, consumerConfig.autoOffsetReset);
// 其他配置再配置
return props;
} public KafkaConsumer getConsumer() {
return new KafkaConsumer(props);
} /**
* 其他类获取topic
* @return
*/
public String getTopic() {
return topic;
} public String getA_groupId() {
return groupId;
}
}
@Component
public class KafkaConsumerPool { /**
* 日志处理
*/
private static final Log log = LogFactory.getLog(KafkaConsumerPool.class); /**
* 线程池
*/
private ExecutorService executor; /**
* 初始化10个线程
*/
@PostConstruct
void init(){
executor = Executors.newFixedThreadPool(10);
} /**
* 提交新的消费者
*
* @param shutdownableThread
*/
public void SubmitConsumerPool(ShutdownableThread shutdownableThread) {
executor.execute(shutdownableThread);
} /**
* 程序关闭,关闭线程池
*
*/
@PreDestroy
void fin(){
shutdown();
} public void shutdown() {
if (executor != null) executor.shutdown();
try {
if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
log.info("Timed out waiting for consumer threads to shut down, exiting uncleanly");
}
} catch (InterruptedException e) {
log.info("Interrupted during shutdown, exiting uncleanly");
}
}
}
相关依赖:
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka_2.11 -->
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<!-- https://mvnrepository.com/artifact/commons-collections/commons-collections -->
<!-- https://mvnrepository.com/artifact/commons-lang/commons-lang -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency> <!--kafka支持-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency> 博主开源项目:https://github.com/Enast/hummer,走过路过,请点个赞
spring boot 集成kafka (多线程,消费者使用kafka的原生api实现,因为@KakfkaListener修改groupId无效)的更多相关文章
- spring boot集成pagehelper(两种方式)
当spring boot集成好mybatis时候需要进行分页,我们首先添加maven支持 <dependency> <groupId>com.github.pagehelper ...
- Spring boot 集成Kafka
搭建Kafka集群,参考: https://www.cnblogs.com/jonban/p/kafka.html 源码示例如下: 1.新建 Maven 项目 kafka 2.pom.xml < ...
- Kafka 入门和 Spring Boot 集成
目录 Kafka 入门和 Spring Boot 集成 标签:博客 概述 应用场景 基本概念 基本结构 和Spring Boot 集成 集成概述 集成环境 kafka 环境搭建 Spring Boot ...
- spring boot 集成 zookeeper 搭建微服务架构
PRC原理 RPC 远程过程调用(Remote Procedure Call) 一般用来实现部署在不同机器上的系统之间的方法调用,使得程序能够像访问本地系统资源一样,通过网络传输去访问远程系统资源,R ...
- 玩转Spring Boot 集成Dubbo
玩转Spring Boot 集成Dubbo 使用Spring Boot 与Dubbo集成,这里我之前尝试了使用注解的方式,简单的使用注解注册服务其实是没有问题的,但是当你涉及到使用注解的时候在服务里面 ...
- Spring boot集成RabbitMQ(山东数漫江湖)
RabbitMQ简介 RabbitMQ是一个在AMQP基础上完整的,可复用的企业消息系统 MQ全称为Message Queue, 消息队列(MQ)是一种应用程序对应用程序的通信方法.应用程序通过读写出 ...
- Spring boot 集成Dubbox(山东数漫江湖)
前言 因为工作原因,需要在项目中集成dubbo,所以去查询dubbo相关文档,发现dubbo目前已经不更新了,所以把目光投向了dubbox,dubbox是当当网基于dubbo二次开发的一个项目,dub ...
- Spring Boot集成Reactor事件处理框架的简单示例
1. Reactor简介 Reactor 是 Spring 社区发布的基于事件驱动的异步框架,不仅解耦了程序之间的强调用关系,而且有效提升了系统的多线程并发处理能力. 2. Spring Boot集成 ...
- Spring boot 集成 Dubbo 快速搭建
架构: 1.ZooKeeper:服务注册中心 2.api工程:提供对外暴露的服务API 3.provider:服务提供者 4.consumer:服务消费者 示例如下: (一)新建 Maven 项目 a ...
随机推荐
- TabBar变透明
[[UIApplication sharedApplication] setStatusBarStyle:UIStatusBarStyleBlackTranslucent animated:YES];
- Spark 源码分析 -- RDD
关于RDD, 详细可以参考Spark的论文, 下面看下源码 A Resilient Distributed Dataset (RDD), the basic abstraction in Spark. ...
- Apache2.4部署python3.6+django2.0项目
一.安装apache Apache是非常有名的web服务器软件,如果想让我们web项目运行几乎离不开它. Apache官方网站:http://httpd.apache.org/ 根据自己的环境,选择相 ...
- 解决 Invalid signature file digest for Manifest 问题
idea打包的jar文件在spark执行是报错: Invalid signature file digest for Manifest 通过以下命令解决: zip -d myjob.jar META- ...
- Python数据分析(一):工具的简单使用
1.Numpy 安装:pip install numpy [root@kvm work]# cat numpy_test.py #!/usr/bin/env python #coding:utf-8 ...
- 20165324 2017-2018-2 《Java程序设计》课程总结
20165324 2017-2018-2 <Java程序设计>课程总结 每周作业链接汇总 预备作业1:20165324 我期望的师生关系 预备作业2:20165324 学习基础与C语言学习 ...
- 关于softnet的加密硬件狗 也就是所谓的赛孚耐
SuperDog-R-2.2.1.iso 上面那个文件就是光盘里面的东西.你买了他们的产品 自然后带着这个玩意. 按照默认路径安装一下. 安装完毕如下图:
- python .bat
传值给.bat os.system('%s %s %s %s %s' % ('image_dispose.bat', change_photo,dic['width'], '-resize', cha ...
- 论文笔记:多标签学习综述(A review on multi-label learning algorithms)
2014 TKDE(IEEE Transactions on Knowledge and Data Engineering) 张敏灵,周志华 简单介绍 传统监督学习主要是单标签学习,而现实生活中目标样 ...
- 20145316《Java程序设计》第六周学习总结
20143516许心远 <Java程序设计>第6周学习总结 教材学习内容总结 10.1.1 1.Java将输入/输出抽象化为串流,数据有来源及目的地,衔接两者的是串流对象. 2.若要将数据 ...