package cn.piesat.controller

import java.text.{DecimalFormat, SimpleDateFormat}
import java.util
import java.util.concurrent.{CountDownLatch, Executors, Future} import ba.common.log.enums.{LogLevel, LogType}
import ba.common.log.utils.LogUtil
import cn.piesat.constants.{HbaseZookeeperConstant, RowkeyConstant}
import cn.piesat.domain._
import cn.piesat.service.impl.{MsgServiceImpl, SparkTaskServiceImpl}
import cn.piesat.thread.HbaseQueryThread
import com.google.gson.Gson
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Result, Scan}
import org.apache.hadoop.hbase.filter.{Filter, FilterList}
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import pie.storage.db.domain._
import pie.storage.db.enums.{CompareOp, DataBaseType} /**
* @author liujie
* spark查询hbase的入口类
*/
object HbaseReader {
val sparkTaskService = new SparkTaskServiceImpl
val msgService = new MsgServiceImpl
val sparkAppName = "sparkApp"
val sparkMaster = "local[6]"
var taskId = 8
val serviceNum = 76
val systemId = 12011
val systemName = "8888"
val cf = "cf1"
val cell = "content"
val zookeeperHost = "bigdata03,bigdata04,bigdata05"
val zookeeperPort = "2181"
val excutor=Executors.newCachedThreadPool() def main(args: Array[String]): Unit = {
try{
if (args.length > 0) {
taskId = args(0).toInt
}
/**
* 第一步,获取SparkContext对象
*/
val sc = getSparkContext
/**
* 第二步,获得查询参数集合
*/
val taskParamList = getTaskParam(taskId, sc)
/**
* 第三步,进行hbase数据查询
*/
val rowkeyRDD = queryHbaseData(taskParamList, sc) rowkeyRDD.saveAsTextFile("file://")
println("rowkeyRDD的数量为:" + rowkeyRDD.count())
val rowkey = rowkeyRDD.first()
println("取出的值为:"+util.Arrays.toString(rowkey._2.getValue(cf.getBytes(),cell.getBytes()))) /**
* 第四步,进行数据解析
*/ /**
* 第五步,将结果写入文本,文本地址在第二步中的taskParamList中
*/ }catch {
case e:Exception =>{
e.printStackTrace()
}
}finally {
excutor.shutdown()
} excutor.shutdown() } /**
* 获取任务Id
*
* @param args
* @return
*/
private def getTaskId(args: Array[String]): Int = {
if (args == null || args.length <= 0) {
-1;
} else {
try {
args.apply(0).toInt
} catch {
case e: Exception =>
-1
}
}
} /**
* 获取sparkContext
*
* @return
*/ private def getSparkContext(): SparkContext = {
val sparkConf = new SparkConf().setAppName(sparkAppName).setMaster(sparkMaster)
sparkConf.set("spark.broadcast.factory", "org.apache.spark.broadcast.HttpBroadcastFactory")
sparkConf.set("spark.network.timeout", "300")
sparkConf.set("spark.streaming.unpersist", "true")
sparkConf.set("spark.scheduler.listenerbus.eventqueue.size", "100000")
sparkConf.set("spark.storage.memoryFraction", "0.5")
sparkConf.set("spark.shuffle.consolidateFiles", "true")
sparkConf.set("spark.shuffle.file.buffer", "64")
sparkConf.set("spark.shuffle.memoryFraction", "0.3")
sparkConf.set("spark.reducer.maxSizeInFlight", "24")
sparkConf.set("spark.shuffle.io.maxRetries", "60")
sparkConf.set("spark.shuffle.io.retryWait", "60")
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
new SparkContext(sparkConf)
} /**
* 获取sparkTask的任务参数集合
*
* @param taskId
* @return
*/
private def getTaskParam(taskId: Int, sc: SparkContext): List[Tuple4[String, String, String, util.List[Filter]]] = {
var list: List[Tuple4[String, String, String, util.List[Filter]]] = List()
val sparkTask = sparkTaskService.getSparkTaskByTaskId(taskId)
val params = sparkTask.getQueryParam
val gson = new Gson
val sparkQueryParams = gson.fromJson(params, classOf[SparkQueryParams])
try {
//1.**
val systemId = sparkQueryParams.getSystemId
//2.开始时间
val startTime = sparkQueryParams.getStartTime
//3.结束时间
val endTime = sparkQueryParams.getEndTime
//4.**
val stationId = sparkQueryParams.getStationId
val paramList = sparkQueryParams.getParams
for (i <- 0 until paramList.size()) {
val param = paramList.get(i)
//5.**
val msgId = param.getMsgId
//6.**
val sinkId = param.getSinkId
//7.**
val sourceId = param.getSourceId
//8.表名
val tableName = msgService.getTieYuanMsgTableNameById(msgId);
for (num <- 0 until serviceNum) {
val rowkeyAndFilters = getRowkeyAndFilters(num, systemId, startTime, endTime, stationId, msgId, sinkId, sourceId, tableName)
list = rowkeyAndFilters :: list
}
}
list
} catch {
case e: Exception =>
LogUtil.writeLog(systemId, LogLevel.ERROR, LogType.NORMAL_LOG, systemName + " Error Info:任务参数异常。" + e)
null
}
} /**
* hbase数据查询
*/
private def queryHbaseData(taskParamList: List[(String, String, String, util.List[Filter])], sc: SparkContext): RDD[(ImmutableBytesWritable, Result)] = {
var rdd: RDD[(ImmutableBytesWritable, Result)] = null
val latch:CountDownLatch=new CountDownLatch(taskParamList.length)
val list: util.List[Future[RDD[Tuple2[ImmutableBytesWritable, Result]]]]=new util.ArrayList[Future[RDD[Tuple2[ImmutableBytesWritable, Result]]]]()
for (taskParam <- taskParamList) {
list.add(excutor.submit(new HbaseQueryThread(taskParam,sc,latch)))
}
import scala.collection.JavaConversions._
for(li <- list){
if(rdd==null){
rdd=li.get()
}else{
rdd=rdd.++(li.get())
}
}
latch.await()
rdd
} /**
* 获取
*
* @param num
* @param systemId
* @param startTime
* @param endTime
* @param stationId
* @param msgId
* @param sinkId
* @param sourceId
* @return
*/
private def getRowkeyAndFilters(num: Int, systemId: Int, startTime: String,
endTime: String, stationId: Int, msgId: Int,
sinkId: Int, sourceId: Int,
tableName: String): Tuple4[String, String, String, util.List[Filter]] = {
//线程非安全,因此每次调用时创建新的对象
val simpleDateFormat1 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS")
val simpleDateFormat2 = new SimpleDateFormat("yyyyMMddHHmmssSSS")
val decimalFormat = new DecimalFormat("00")
val queryDef = new QueryDef
//1.设置数据库
queryDef.setDataBaseType(DataBaseType.HBASE)
//2.设置表名
queryDef.setTableName(tableName)
//3.设置请求参数集合
//3.1设置**Id参数
val systemIdParam = new QueryParam
systemIdParam.setField(new Field(new FieldInfo(RowkeyConstant.SYSTEM_ID), new FieldValue(systemId)))
systemIdParam.setCompareOp(CompareOp.EQUAL)
//3.2设置**
val msgIdParam = new QueryParam
msgIdParam.setField(new Field(new FieldInfo(RowkeyConstant.MSG_ID), new FieldValue(msgId)))
msgIdParam.setCompareOp(CompareOp.EQUAL)
//3.3设置开始时间参数
val startTimeParam = new QueryParam
val startTimeFormat = simpleDateFormat2.format(simpleDateFormat1.parse(startTime))
startTimeParam.setField(new Field(new FieldInfo(RowkeyConstant.TIME), new FieldValue(startTimeFormat)))
startTimeParam.setCompareOp(CompareOp.GREATER)
//3.4设置结束时间参数
val endTimeParam = new QueryParam
val endTimeFormat = simpleDateFormat2.format(simpleDateFormat1.parse(endTime))
endTimeParam.setField(new Field(new FieldInfo(RowkeyConstant.TIME), new FieldValue(endTimeFormat)))
endTimeParam.setCompareOp(CompareOp.LESS)
//3.5设置**
val sourceParam = new QueryParam
sourceParam.setField(new Field(new FieldInfo(RowkeyConstant.SINK_ID), new FieldValue(sinkId)))
sourceParam.setCompareOp(CompareOp.EQUAL)
//3.6设置**
val sinkParam = new QueryParam
sinkParam.setField(new Field(new FieldInfo(RowkeyConstant.SOURCE_ID), new FieldValue(sourceId)))
sinkParam.setCompareOp(CompareOp.EQUAL)
val queryParamList = util.Arrays.asList(systemIdParam, msgIdParam, startTimeParam, endTimeParam, sourceParam, sinkParam)
queryDef.setListQueryParam(queryParamList)
val startRowkey = decimalFormat.format(num) + queryDef.getStartRowKey(classOf[String])
val endRowkey = decimalFormat.format(num) + queryDef.getStopRowKey(classOf[String])
val filters = queryDef.getFilters(2, num, classOf[String])
new Tuple4(tableName, startRowkey, endRowkey, filters)
} /**
* 进行hbase查询
*
* @param taskParam
* @param sc
*/
def getHbaseQueryRDD(taskParam: (String, String, String, util.List[Filter]), sc: SparkContext): RDD[(ImmutableBytesWritable, Result)] = {
val hbaseConf = HBaseConfiguration.create()
hbaseConf.set(HbaseZookeeperConstant.HBASE_ZOOKEEPER_QUORUM, zookeeperHost)
hbaseConf.set(HbaseZookeeperConstant.HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, zookeeperPort)
hbaseConf.set(TableInputFormat.INPUT_TABLE, taskParam._1)
val scan = new Scan()
scan.setStartRow(Bytes.toBytes(taskParam._2))
scan.setStopRow(Bytes.toBytes(taskParam._3))
val filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL, taskParam._4)
scan.setFilter(filterList)
hbaseConf.set(TableInputFormat.SCAN, convertScanToString(scan))
val rs = sc.newAPIHadoopRDD(
hbaseConf,
classOf[TableInputFormat],
classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
classOf[org.apache.hadoop.hbase.client.Result])
//todo 解析
rs
// rs.map(tuple2=>{
// val result=tuple2._2
// result.
// })
} private def convertScanToString(scan: Scan) = {
val proto = ProtobufUtil.toScan(scan)
Base64.encodeBytes(proto.toByteArray)
}
}

spark读取hbase(NewHadoopAPI 例子)的更多相关文章

  1. Spark 读取HBase和SolrCloud数据

    Spark1.6.2读取SolrCloud 5.5.1 //httpmime-4.4.1.jar // solr-solrj-5.5.1.jar //spark-solr-2.2.2-20161007 ...

  2. spark读取hbase形成RDD,存入hive或者spark_sql分析

    object SaprkReadHbase { var total:Int = 0 def main(args: Array[String]) { val spark = SparkSession . ...

  3. Spark 读取HBase数据

    Spark1.6.2 读取 HBase 1.2.3 //hbase-common-1.2.3.jar //hbase-protocol-1.2.3.jar //hbase-server-1.2.3.j ...

  4. Spark读取Hbase中的数据

    大家可能都知道很熟悉Spark的两种常见的数据读取方式(存放到RDD中):(1).调用parallelize函数直接从集合中获取数据,并存入RDD中:Java版本如下: JavaRDD<Inte ...

  5. Spark读取HBase

    背景:公司有些业务需求是存储在HBase上的,总是有业务人员找我要各种数据,所以想直接用Spark( shell) 加载到RDD进行计算 摘要: 1.相关环境 2.代码例子 内容 1.相关环境 Spa ...

  6. spark读取hbase数据

    def main(args: Array[String]): Unit = { val hConf = HBaseConfiguration.create(); hConf.set("hba ...

  7. Spark读取Hbase的数据

    val conf = HBaseConfiguration.create() conf.addResource(new Path("/opt/cloudera/parcels/CDH-5.4 ...

  8. spark大批量读取Hbase时出现java.lang.OutOfMemoryError: unable to create new native thread

    这个问题我去网上搜索了一下,发现了很多的解决方案都是增加的nproc数量,即用户最大线程数的数量,但我修改了并没有解决问题,最终是通过修改hadoop集群的最大线程数解决问题的. 并且网络上的回答多数 ...

  9. Spark整合HBase,Hive

    背景: 场景需求1:使用spark直接读取HBASE表 场景需求2:使用spark直接读取HIVE表 场景需求3:使用spark读取HBASE在Hive的外表 摘要: 1.背景 2.提交脚本 内容 场 ...

随机推荐

  1. Cisco路由器的dhcp服务的配置的命令

    Router(config)#IP DHCP POOL Jason Router(dhcp-config)#net 172.16.10.0 255.255.255.0 Router(dhcp-conf ...

  2. django中Queryset的删除问题、分页问题

    在开发选课界面时需要过滤掉已经选择过的课程,之前一直以为QuerySet是列表的结构,所以打算在判断之后使用list.remove()方法将已选的课程除掉,但在实际操作时,发现这么做并不行,原来Que ...

  3. ctype.h头文件

    定义了一批C语言字符分类函数(C character classification functions),用于测试字符是否属于特定的字符类别,如字母字符.控制字符等等.既支持单字节(Byte)字符,也 ...

  4. zip 命令

    NAME zip - package and compress (archive) files SYNOPSIS         zip [-aABcdDeEfFghjklLmoqrRSTuvVwXy ...

  5. Elasticsearch集群基本操作

    检查集群的命令 $ curl http://172.16.101.55:9200/_cat =^.^= /_cat/allocation /_cat/shards /_cat/shards/{inde ...

  6. 查找担保圈-step3-获取担保圈路径

    USE [test] GO /****** Object: StoredProcedure [dbo].[p01_get_group_path] Script Date: 2019/7/8 14:40 ...

  7. mybatis 基础(二) xml文件中的其他知识点

    mybatis xml文件中一些标签的使用 此标签主要用作 配置 "别名" 如果实体类与数据库中字段名在不区分大小写的情况下相同的话, 那就不需要配置resultMap,因为mys ...

  8. 面试40-一个数组,有2个数字出现奇数次,其余都是偶数次,求这两个数字O(n) O(1)

    #include<iostream> using namespace std; // 题目:数组中只有不多于两个数字出现次数是奇数次,其他都是偶数次,求出出现奇数次的数字(不含0的数组) ...

  9. 线性基求交(线段树)--牛客第四场(xor)

    题意: 给你n个基,q个询问,每个询问问你能不能 l~r 的所有基都能表示 x . 思路: 建一颗线性基的线段树,up就是求交的过程,按照线段树区间查询的方法进行check就可以了. #define ...

  10. python-day10(正式学习)

    目录 字符编码 计算机基础 文本编辑器存取文件的原理 python解释器执行py文件的原理 python解释器与文本编辑的异同 字符编码介绍 字符编码的分类 乱码分析 总结 文件操作 三种基本操作 文 ...