RoundRobinPartitioner/HashPartitioner:

import java.util
import java.util.concurrent.atomic.AtomicLong import org.apache.kafka.clients.producer.Partitioner
import org.apache.kafka.common.Cluster class SelfRoundRobinPartitioner extends Partitioner { val next = new AtomicLong(); override def partition(topic: String, key: scala.Any, keyBytes: Array[Byte], value: scala.Any, valueBytes: Array[Byte], cluster: Cluster) = {
val partitionInfo = cluster.partitionsForTopic(topic)
val numPartitions = partitionInfo.size()
val nextIndex = next.incrementAndGet()
val partionNum: Long = nextIndex % numPartitions
partionNum.toInt
} override def close() = { } override def configure(configs: util.Map[String, _]) = { }
}
import java.util

import scala.math._
import kafka.utils.VerifiableProperties
import org.apache.kafka.clients.producer.Partitioner
import org.apache.kafka.common.Cluster class SelfHashPartitioner extends Partitioner { override def partition(topic: String, key: scala.Any, keyBytes: Array[Byte], value: scala.Any, valueBytes: Array[Byte], cluster: Cluster) = {
val partitionInfo = cluster.partitionsForTopic(topic)
val numPartitions = partitionInfo.size() if (key.isInstanceOf[Int]) {
abs(key.toString().toInt) % numPartitions
} key.hashCode() % numPartitions } override def close() = { } override def configure(configs: util.Map[String, _]) = { }
}
import java.util.Properties
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} object KafkaProducer {
def main(args: Array[String]): Unit = { val brokers = "192.168.1.151:9092,192.168.1.152:9092,192.168.1.153:9092"
// val brokers = "192.168.1.151:9092"
val topic = "ScalaTopic"; val props = new Properties()
props.put("bootstrap.servers", brokers)
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
// props.put("partitioner.class", classOf[SelfHashPartitioner].getName)
props.put("partitioner.class", classOf[SelfRoundRobinPartitioner].getName)
props.put("producer.type", "sync")
props.put("batch.size", "1")
props.put("acks", "all") val producer = new KafkaProducer[String, String](props); val sleepFlag = false;
val message1 = new ProducerRecord[String, String](topic, "1", "test 1aa");
producer.send(message1);
if (sleepFlag) Thread.sleep(5000);
val message2 = new ProducerRecord[String, String](topic, "1", "test 1bb");
producer.send(message2);
if (sleepFlag) Thread.sleep(5000);
val message3 = new ProducerRecord[String, String](topic, "1", "test 1cc");
producer.send(message3);
if (sleepFlag) Thread.sleep(5000);
val message4 = new ProducerRecord[String, String](topic, "4", "test 4dd");
producer.send(message4);
if (sleepFlag) Thread.sleep(5000);
val message5 = new ProducerRecord[String, String](topic, "4", "test 4aa");
producer.send(message5);
if (sleepFlag) Thread.sleep(5000);
val message6 = new ProducerRecord[String, String](topic, "3", "test 3bb");
producer.send(message6);
if (sleepFlag) Thread.sleep(5000);
val message7 = new ProducerRecord[String, String](topic, "2", "test 2bb");
producer.send(message7);
if (sleepFlag) Thread.sleep(5000);
producer.close()
}
}
import java.lang
import java.util.Properties import org.apache.kafka.clients.consumer.{ConsumerRecord, ConsumerRecords, KafkaConsumer}
import scala.collection.JavaConversions._ object KafkaTConsumer {
def main(args: Array[String]): Unit = {
var groupid = "ScalaGroup"
var consumerid = "ScalaConsumer"
var topic = "ScalaTopic" //args match {
// case Array(arg1, arg2, arg3) => topic = arg1; groupid = arg2; consumerid = arg3
//} val props = new Properties()
props.put("bootstrap.servers", "192.168.1.151:9092,192.168.1.152:9092,192.168.1.153:9092")
props.put("group.id", groupid)
props.put("client.id", "test")
props.put("consumer.id", consumerid)
// props.put("auto.offset.reset", "smallest")
props.put("enable.auto.commit", "true")
props.put("auto.commit.interval.ms", "100")
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer") val consumer = new KafkaConsumer[String, String](props)
consumer.subscribe(java.util.Arrays.asList(topic)) while (true) {
val records: ConsumerRecords[String, String] = consumer.poll(100)
for (record <- records) {
println(s"Topic = ${record.topic()}, partition = ${record.partition()}, key = ${record.key()}, value = ${record.value()}")
}
} }
}

Roud robin运行结果:

Topic = ScalaTopic, partition = 0, key = 1, value = test 1cc
Topic = ScalaTopic, partition = 0, key = 3, value = test 3bb
Topic = ScalaTopic, partition = 1, key = 1, value = test 1aa
Topic = ScalaTopic, partition = 1, key = 4, value = test 4dd
Topic = ScalaTopic, partition = 2, key = 1, value = test 1bb
Topic = ScalaTopic, partition = 2, key = 4, value = test 4aa
Topic = ScalaTopic, partition = 1, key = 2, value = test 2bb

Hash 运行结果:

Topic = ScalaTopic, partition = 1, key = 1, value = test 1aa
Topic = ScalaTopic, partition = 1, key = 1, value = test 1bb
Topic = ScalaTopic, partition = 0, key = 3, value = test 3bb
Topic = ScalaTopic, partition = 2, key = 2, value = test 2bb
Topic = ScalaTopic, partition = 1, key = 1, value = test 1cc
Topic = ScalaTopic, partition = 1, key = 4, value = test 4dd
Topic = ScalaTopic, partition = 1, key = 4, value = test 4aa

Kafka0.11之RoundRobinPartitioner/HashPartitioner(Scala):的更多相关文章

  1. Kafka 学习笔记之 Kafka0.11之producer/consumer(Scala)

    Kafka0.11之producer/consumer(Scala): KafkaConsumer: import java.util.Properties import org.apache.kaf ...

  2. Kafka 学习笔记之 Kafka0.11之console-producer/console-consumer

    Kafka 学习笔记之 Kafka0.11之console-producer/console-consumer: 启动Zookeeper 启动Kafka0.11 创建一个新的Topic: ./kafk ...

  3. kafka 幂等生产者及事务(kafka0.11之后版本新特性)

    1. 幂等性设计1.1 引入目的生产者重复生产消息.生产者进行retry会产生重试时,会重复产生消息.有了幂等性之后,在进行retry重试时,只会生成一个消息. 1.2 幂等性实现1.2.1 PID ...

  4. kafka0.8--0.11各个版本特性预览介绍

    kafka-0.8.2 新特性 producer不再区分同步(sync)和异步方式(async),所有的请求以异步方式发送,这样提升了客户端效率.producer请求会返回一个应答对象,包括偏移量或者 ...

  5. kafka 0.11.0.3 源码编译

    首先下载 kafka 0.11.0.3 版本 源码: http://mirrors.hust.edu.cn/apache/kafka/0.11.0.3/ 下载源码 首先安装 gradle,不再说明 1 ...

  6. intellij 调试spark scala 程序 报错

    spark用的是cdh spark-2.0.1 package main.scala import org.apache.spark.rdd.RDD import org.apache.spark.{ ...

  7. geotrellis使用(六)Scala并发(并行)编程

    本文主要讲解Scala的并发(并行)编程,那么为什么题目概称geotrellis使用(六)呢,主要因为本系列讲解如何使用Geotrellis,具体前几篇博文已经介绍过了.我觉得干任何一件事情基础很重要 ...

  8. Eclipse+maven+scala2.11.8+spark2.0.0的环境部署

    主要在maven-for-scalaIDE纠结了,因为在eclipse版本是luna4.x 里面有自己带有的maven. 根据网上面无脑的下一步下一步,出现了错误,在此讲解各个插件的用途,以此新人看见 ...

  9. 如何在Ubuntu上配置scala教程

    Scala是一门多范式的编程语言,一种类似java的编程语言,设计初衷是实现可伸缩的语言 .并集成面向对象和函数式变成的各种特性.这里为了学习spark特地配置了scala. 1.下载scala安装包 ...

随机推荐

  1. 牛客练习赛39 D 动态连通块+并查集 X bitset 优化

    https://ac.nowcoder.com/acm/contest/368/D 题意 小T有n个点,每个点可能是黑色的,可能是白色的.小T对这张图的定义了白连通块和黑连通块:白连通块:图中一个点集 ...

  2. 2019杭电多校6 hdu6638 Snowy Smile(二维最大矩阵和 线段树)

    http://acm.hdu.edu.cn/showproblem.php?pid=6638 题意:给你一些点的权值,让找一个矩形圈住一部分点,问圈住点的最大权值和 分析:由于是稀疏图,明显要先把x, ...

  3. HDU 4322Candy 最大费用最大流

    由于被小孩子不喜欢的糖果的对小孩产生的效力是一样的,所以我们在网络流的时候先不考虑. 1 - 源点0到1~N个糖果,容量为1,费用为02 - 根据like数组,like[i][j] == 1时在糖果j ...

  4. java 面试题 1-10

    1. Java 基础部分 基础部分的顺序:基本语法,类相关的语法,内部类的语法,继承相关的语法,异常的语法, 线程的语法,集合的语法,io 的语法,虚拟机方面的语法 1.一个".java&q ...

  5. mybatis转义

    SELECT * FROM test WHERE 1 = 1 AND start_date <= CURRENT_DATE AND end_date >= CURRENT_DATE 在执行 ...

  6. Redis删除集群以及重新启动集群

    有时候我们搭建完集群以后,对集群进行了一些错误的操作,导致集群出现了不可预料的问题,这时候想要删除集群重新启动一个原始的集群,那么如何删除原来旧的集群呢? 1.关闭所有开启的Redis节点 kill ...

  7. openresty域名动态解析

    工作中使用openresty,使用第三方服务API通过域名访问.但是,域名通过DNS解析出来之后,在openresty是有 配置解析阶段 很多时候我们会在 Nginx 配置文件里配置上一些域名,比如配 ...

  8. .Net基础篇_学习笔记_第五天_流程控制do-while循环

    using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.T ...

  9. Spring Boot (三): ORM 框架 JPA 与连接池 Hikari

    前面两篇文章我们介绍了如何快速创建一个 Spring Boot 工程<Spring Boot(一):快速开始>和在 Spring Boot 中如何使用模版引擎 Thymeleaf 渲染一个 ...

  10. 在Typora中使用Markdown

    在Typora中使用Markdown 记:准备正式开始在博客园写博客,故学习Markdown语法,为写博客做好准备.以前也在CSDN写过一些,但广告太多,个人更喜欢博客园的简洁. 1. 标题 (#)标 ...