由于SparkSQL不支持HBase的数据源(HBase-1.1.2),网上有很多是采用Hortonworks的SHC,而SparkSQL操作HBase自定义数据源大多数都是基于Scala实现,我就自己写了一个Java版的SparkSQL操作HBase的小案例。

1、SparkOnHBase

package com.mengyao.tag.utils.external.hbase;

import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession; /**
*
* @author mengyao
*
*/
public class SparkSQLOnHBase { public static void main(String[] args) {
SparkConf conf = new SparkConf()
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer"); SparkSession session = SparkSession.builder()
.config(conf)
.appName("SparkOnHBase")
.master("local[*]")
.getOrCreate(); Dataset<Row> df = session.read()
.format("com.mengyao.tag.utils.external.hbase.HBaseSource")
.option("zkHosts", "192.168.10.20")
.option("zkPort", "2181")
.option("hbaseTable", "tbl_tag_user")
.option("family", "test")
.option("selectFields", "id,username,email,phone")
//.option("selectFields", "uid,tids")
.load();
df.printSchema();
df.logicalPlan();
df.explain();
df.filter("id>10").show(); session.close();
} }

2、HBaseSource

package com.mengyao.tag.utils.external.hbase;

import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.sources.BaseRelation;
import org.apache.spark.sql.sources.RelationProvider; import scala.collection.immutable.Map; /**
*
* @author mengyao
*
*/
public class HBaseSource implements RelationProvider {
@Override
public BaseRelation createRelation(SQLContext sqlContext, Map<String, String> options) {
return new HBaseRelation(sqlContext, options);
}
}

3、HBaseRelation

package com.mengyao.tag.utils.external.hbase;

import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.sources.BaseRelation;
import org.apache.spark.sql.sources.TableScan;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; import scala.Tuple2;
import scala.collection.JavaConverters;
import scala.collection.immutable.Map; /**
*
* @author mengyao
*
*/
public class HBaseRelation extends BaseRelation implements Serializable, TableScan {
private static final long serialVersionUID = 4234614443074355432L;
private static transient Logger logger = LoggerFactory.getLogger(HBaseRelation.class);
private final String HBASE_ZK_PORT_KEY = "hbase.zookeeper.property.clientPort";
private final String HBASE_ZK_PORT_VALUE = "zkPort";
private final String HBASE_ZK_QUORUM_KEY = "hbase.zookeeper.quorum";
private final String HBASE_ZK_QUORUM_VALUE = "zkHosts";
private final String HBASE_ZK_PARENT_KEY = "zookeeper.znode.parent";
private final String HBASE_ZK_PARENT_VALUE = "/hbase-unsecure";
private final String HBASE_TABLE = "hbaseTable";
private final String HBASE_TABLE_FAMILY = "family";
private final String HBASE_TABLE_SELECT_FIELDS = "selectFields";
private final String sperator = ",";
private final String ROW = "row";
private SQLContext sqlContext;
private java.util.Map<String, String> options;
private StructType schema = null;
private boolean updateSchema = true; public HBaseRelation(SQLContext sqlContext, Map<String, String> options) {
this.sqlContext = sqlContext;
this.options = JavaConverters.mapAsJavaMapConverter(options).asJava();
} @Override
public RDD<Row> buildScan() {
validParams(options);
return scan(sqlContext, options);
} @Override
public StructType schema() {
if (updateSchema || schema == null) {
List<StructField> fields = new ArrayList<>();
fields.add(DataTypes.createStructField(ROW, DataTypes.StringType, false));
String fieldsStr = options.get(HBASE_TABLE_SELECT_FIELDS);
String[] fieldStrs = fieldsStr.split(sperator);
Stream.of(fieldStrs).forEach(field -> fields.add(DataTypes.createStructField(field, DataTypes.StringType, false)));
schema = DataTypes.createStructType(fields);
updateSchema = false;
}
logger.info("==== HBaseSource Schema is:{} ====", schema);
return schema;
} @Override
public SQLContext sqlContext() {
return sqlContext;
} private void validParams(java.util.Map<String, String> options){
String zkHosts = options.get(HBASE_ZK_QUORUM_VALUE);
Preconditions.checkNotNull(zkHosts, "zkHosts not null!");
String zkPort = options.get(HBASE_ZK_PORT_VALUE);
Preconditions.checkNotNull(zkPort, "zkPort not null!");
String family = options.get(HBASE_TABLE_FAMILY);
Preconditions.checkNotNull(family, "family not null!");
String fieldsStr = options.get(HBASE_TABLE_SELECT_FIELDS);
Preconditions.checkNotNull(fieldsStr, "fieldsStr not null!");
} private RDD<Row> scan(SQLContext sqlContext, java.util.Map<String, String> options) {
try {
Configuration conf = HBaseConfiguration.create();
conf.set(HBASE_ZK_PORT_KEY, options.get(HBASE_ZK_PORT_VALUE));
conf.set(HBASE_ZK_QUORUM_KEY, options.get(HBASE_ZK_QUORUM_VALUE));
conf.set(HBASE_ZK_PARENT_KEY, HBASE_ZK_PARENT_VALUE);
String family = options.get(HBASE_TABLE_FAMILY);
String fieldsStr = options.get(HBASE_TABLE_SELECT_FIELDS);
String[] selectFileds = fieldsStr.split(sperator); Scan scan = new Scan();
conf.set(TableInputFormat.INPUT_TABLE, options.get(HBASE_TABLE));
ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
String scanStr = Base64.encodeBytes(proto.toByteArray());
conf.set(TableInputFormat.SCAN, scanStr);
logger.info("==== HBaseSource Scan is:{} ====", scanStr); RDD<Tuple2<ImmutableBytesWritable, Result>> hbaseRdd = sqlContext.sparkContext().newAPIHadoopRDD(conf,
TableInputFormat.class, ImmutableBytesWritable.class, Result.class); return hbaseRdd.toJavaRDD().map(t -> t._2).map(r -> {
LinkedList<String> vals = new LinkedList<>();
String row = Bytes.toString(r.getRow());
vals.add(row);
Stream.of(selectFileds).forEach(field -> {
String val = Bytes.toString(r.getValue(Bytes.toBytes(family), Bytes.toBytes(field)));
vals.add(val);
});
return (Row) RowFactory.create(vals.toArray());
}).rdd();
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
}

Spark-2.3.2 Java SparkSQL的自定义HBase数据源的更多相关文章

  1. SparkSQL 如何自定义函数

    1. SparkSql如何自定义函数 2. 示例:Average 3. 类型安全的自定义函数 1. SparkSql如何自定义函数? spark中我们定义一个函数,需要继承 UserDefinedAg ...

  2. spark提示Caused by: java.lang.ClassCastException: scala.collection.mutable.WrappedArray$ofRef cannot be cast to [Lscala.collection.immutable.Map;

    spark提示Caused by: java.lang.ClassCastException: scala.collection.mutable.WrappedArray$ofRef cannot b ...

  3. [转]Java中实现自定义的注解处理器

    Java中实现自定义的注解处理器(Annotation Processor) 置顶2016年07月25日 19:42:49 阅读数:9877 在之前的<简单实现ButterKnife的注解功能& ...

  4. 移动开发首页业界资讯移动应用平台技术专题 输入您要搜索的内容 基于Java Socket的自定义协议,实现Android与服务器的长连接(二)

    在阅读本文前需要对socket以及自定义协议有一个基本的了解,可以先查看上一篇文章<基于Java Socket的自定义协议,实现Android与服务器的长连接(一)>学习相关的基础知识点. ...

  5. spark 执行报错 java.io.EOFException: Premature EOF from inputStream

    使用spark2.4跟spark2.3 做替代公司现有的hive选项. 跑个别任务spark有以下错误 java.io.EOFException: Premature EOF from inputSt ...

  6. Java之SpringBoot自定义配置与整合Druid

    Java之SpringBoot自定义配置与整合Druid SpringBoot配置文件 优先级 前面SpringBoot基础有提到,关于SpringBoot配置文件可以是properties或者是ya ...

  7. 《手把手教你》系列技巧篇(七十一)-java+ selenium自动化测试-自定义类解决元素同步问题(详解教程)

    1.简介 前面宏哥介绍了几种关于时间等待的方法,也提到了,在实际自动化测试脚本开发过程,百分之90的报错是和元素因为时间不同步而发生报错.本文介绍如何新建一个自定义的类库来解决这个元素同步问题.这样, ...

  8. 0基础就可以上手的Spark脚本开发-for Java

    前言 最近由于工作需要,要分析大几百G的Nginx日志数据.之前也有过类似的需求,但那个时候数据量不多.一次只有几百兆,或者几个G.因为数据都在Hive里面,当时的做法是:把数据从Hive导到MySQ ...

  9. 第十一篇:Spark SQL 源码分析之 External DataSource外部数据源

    上周Spark1.2刚发布,周末在家没事,把这个特性给了解一下,顺便分析下源码,看一看这个特性是如何设计及实现的. /** Spark SQL源码分析系列文章*/ (Ps: External Data ...

随机推荐

  1. [LeetCode] 489. Robot Room Cleaner 扫地机器人

    Given a robot cleaner in a room modeled as a grid. Each cell in the grid can be empty or blocked. Th ...

  2. [LeetCode] 557. Reverse Words in a String III 翻转字符串中的单词 III

    Given a string, you need to reverse the order of characters in each word within a sentence while sti ...

  3. [LeetCode] 765. Couples Holding Hands 情侣牵手

    N couples sit in 2N seats arranged in a row and want to hold hands. We want to know the minimum numb ...

  4. rabbitmq设置消息优先级、队列优先级配置

    1.首先在consume之前声明队列的时候,要加上x-max-priority属性,一般为0-255,大于255出错  -----配置队列优先级 配置成功后rabbitmq显示: 2.在向exchan ...

  5. springmvc中跨域问题

    对于web框架中的跨域问题是一个非常普遍的问题,常见的解决方案也有很多,如:jsonp.cros.websocket等.下面是最近处理springmvc中使用cors解决跨域问题的一些总结. Filt ...

  6. 08 Tomcat+Java Web项目的创建和War的生成

    1.web服务器软件:服务器:安装了服务器软件的计算机服务器软件:接收用户的请求,处理请求,做出响应 * web服务器软件:接收用户的请求,处理请求,做出响应. 在web服务器软件中,可以部署web项 ...

  7. Netty--索引

    Netty 入门示例 Netty原理架构解析 Netty 基本原理 Netty面试题 阿里的Netty知识点你又了解多少

  8. Js学习02--变量、关键字、标识符

    一.Js变量的定义 1.定义变量的目的 在内存中分配一块存储空间给变量,方便以后存储数据. 2.如何定义变量 任何变量在使用前都必须定义变量 var 变量名称 eg: var name,age,sex ...

  9. quartz2.3.0(八)使用日历排除不应该执行任务的时间段

    Job任务类 package org.quartz.examples.example8; import java.util.Date; import org.slf4j.Logger; import ...

  10. 配置Setting.xml文件提高maven更新下载jar包速度

    <?xml version="1.0" encoding="UTF-8"?> <settings xmlns="http://mav ...