Flink批处理与hbase的读写

source-hbase

父类

是模仿官方写的.

import org.apache.flink.api.common.io.LocatableInputSplitAssigner;
import org.apache.flink.api.common.io.RichInputFormat;
import org.apache.flink.api.common.io.statistics.BaseStatistics;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.io.InputSplitAssigner;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import java.io.IOException;
import java.util.ArrayList;
import java.util.List; /**
* @Auther WeiJiQian
* @描述
*/
public abstract class SourceHBaseInputBase<T> extends RichInputFormat<T, MyTableInputSplit>{
protected static final Logger LOG = LoggerFactory.getLogger(SourceHBaseInputBase.class); // helper variable to decide whether the input is exhausted or not
protected boolean endReached = false; protected transient HTable table = null;
protected transient Scan scan = null;
protected transient Connection connection = null; /** HBase iterator wrapper. */
protected ResultScanner resultScanner = null; protected byte[] currentRow;
protected long scannedRows; protected ParameterTool parameterTool;
protected abstract T mapResultToOutType(Result r);
protected abstract void getScan();
protected abstract TableName getTableName(); protected void getTable() throws IOException {
org.apache.hadoop.conf.Configuration configuration;
parameterTool = PropertiesUtil.PARAMETER_TOOL;
configuration = HBaseConfiguration.create();
configuration.set(HBASE_ZOOKEEPER_QUORUM, parameterTool.get(HBASE_ZOOKEEPER_QUORUM));
configuration.set(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, parameterTool.get(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
configuration.set(HBASE_RPC_TIMEOUT, parameterTool.get(HBASE_RPC_TIMEOUT));
configuration.set(HBASE_CLIENT_OPERATION_TIMEOUT, parameterTool.get(HBASE_CLIENT_OPERATION_TIMEOUT));
configuration.set(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, parameterTool.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));
connection = ConnectionFactory.createConnection(configuration);
table = (HTable) connection.getTable(getTableName()); } @SneakyThrows
@Override
public void configure(Configuration parameters) {
getTable();
getScan();
} @Override
public void open(MyTableInputSplit split) throws IOException {
System.out.println("open:" + table == null);
if (table == null) {
System.out.println("open:table is null ---------");
throw new IOException("The HBase table has not been opened! " +
"This needs to be done in configure().");
}
if (scan == null) {
throw new IOException("Scan has not been initialized! " +
"This needs to be done in configure().");
}
if (split == null) {
throw new IOException("Input split is null!");
} logSplitInfo("opening", split); // set scan range
currentRow = split.getStartRow();
scan.setStartRow(currentRow);
scan.setStopRow(split.getEndRow()); resultScanner = table.getScanner(scan);
endReached = false;
scannedRows = 0;
} public T nextRecord(T reuse) throws IOException {
if (resultScanner == null) {
throw new IOException("No table result scanner provided!");
}
Result res;
try {
res = resultScanner.next();
} catch (Exception e) {
resultScanner.close();
//workaround for timeout on scan
LOG.warn("Error after scan of " + scannedRows + " rows. Retry with a new scanner...", e);
scan.withStartRow(currentRow, false);
resultScanner = table.getScanner(scan);
res = resultScanner.next();
} if (res != null) {
scannedRows++;
currentRow = res.getRow();
return mapResultToOutType(res);
} endReached = true;
return null;
} private void logSplitInfo(String action, MyTableInputSplit split) {
int splitId = split.getSplitNumber();
String splitStart = Bytes.toString(split.getStartRow());
String splitEnd = Bytes.toString(split.getEndRow());
String splitStartKey = splitStart.isEmpty() ? "-" : splitStart;
String splitStopKey = splitEnd.isEmpty() ? "-" : splitEnd;
String[] hostnames = split.getHostnames();
LOG.info("{} split (this={})[{}|{}|{}|{}]", action, this, splitId, hostnames, splitStartKey, splitStopKey);
} @Override
public boolean reachedEnd() throws IOException {
return endReached;
} @Override
public void close() throws IOException {
LOG.info("Closing split (scanned {} rows)", scannedRows);
currentRow = null;
try {
if (resultScanner != null) {
resultScanner.close();
}
} finally {
resultScanner = null;
}
} @Override
public void closeInputFormat() throws IOException {
try {
if (connection != null) {
connection.close();
}
} finally {
connection = null;
} try {
if (table != null) {
table.close();
}
} finally {
table = null;
}
} @Override
public MyTableInputSplit[] createInputSplits(final int minNumSplits) throws IOException {
if (table == null) {
throw new IOException("The HBase table has not been opened! " +
"This needs to be done in configure().");
}
if (scan == null) {
throw new IOException("Scan has not been initialized! " +
"This needs to be done in configure().");
} // Get the starting and ending row keys for every region in the currently open table
final Pair<byte[][], byte[][]> keys = table.getRegionLocator().getStartEndKeys();
if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) {
throw new IOException("Expecting at least one region.");
}
final byte[] startRow = scan.getStartRow();
final byte[] stopRow = scan.getStopRow();
final boolean scanWithNoLowerBound = startRow.length == 0;
final boolean scanWithNoUpperBound = stopRow.length == 0; final List<MyTableInputSplit> splits = new ArrayList<MyTableInputSplit>(minNumSplits);
for (int i = 0; i < keys.getFirst().length; i++) {
final byte[] startKey = keys.getFirst()[i];
final byte[] endKey = keys.getSecond()[i];
final String regionLocation = table.getRegionLocator().getRegionLocation(startKey, false).getHostnamePort();
// Test if the given region is to be included in the InputSplit while splitting the regions of a table
if (!includeRegionInScan(startKey, endKey)) {
continue;
}
// Find the region on which the given row is being served
final String[] hosts = new String[]{regionLocation}; // Determine if regions contains keys used by the scan
boolean isLastRegion = endKey.length == 0;
if ((scanWithNoLowerBound || isLastRegion || Bytes.compareTo(startRow, endKey) < 0) &&
(scanWithNoUpperBound || Bytes.compareTo(stopRow, startKey) > 0)) { final byte[] splitStart = scanWithNoLowerBound || Bytes.compareTo(startKey, startRow) >= 0 ? startKey : startRow;
final byte[] splitStop = (scanWithNoUpperBound || Bytes.compareTo(endKey, stopRow) <= 0)
&& !isLastRegion ? endKey : stopRow;
int id = splits.size();
final MyTableInputSplit split = new MyTableInputSplit(id, hosts, table.getName().getName(), splitStart, splitStop);
splits.add(split);
}
}
LOG.info("Created " + splits.size() + " splits");
for (MyTableInputSplit split : splits) {
logSplitInfo("created", split);
}
return splits.toArray(new MyTableInputSplit[splits.size()]);
} /**
* Test if the given region is to be included in the scan while splitting the regions of a table.
*
* @param startKey Start key of the region
* @param endKey End key of the region
* @return true, if this region needs to be included as part of the input (default).
*/
protected boolean includeRegionInScan(final byte[] startKey, final byte[] endKey) {
return true;
} @Override
public InputSplitAssigner getInputSplitAssigner(MyTableInputSplit[] inputSplits) {
return new LocatableInputSplitAssigner(inputSplits);
} @Override
public BaseStatistics getStatistics(BaseStatistics cachedStatistics) {
return null;
} }

子类

import org.apache.flink.configuration.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.util.Bytes; import javax.swing.*;
import java.util.List; import static org.apache.hadoop.hbase.filter.FilterList.Operator.MUST_PASS_ONE; /**
* @author WeiJiQian
* @param
* @return
*/
public class SourceDaysHbase extends SourceHBaseInputBase<UsersBean> { public SourceDaysHbase(List<String> dates){
this.dates = dates;
} private List<String> dates;
private UsersBean usersBean = new UsersBean(); @Override
public void configure(Configuration parameters) {
super.configure(parameters);
} @Override
protected UsersBean mapResultToOutType(Result r) {
usersBean.setPhone11(CustomizeUtils.getPhoneOfPersonaDataRowKey(Bytes.toString(r.getRow())));
usersBean.setPhone8(CustomizeUtils.getPhone8(usersBean.getPhone11()));
return usersBean;
} @Override
protected void getScan() {
scan = new Scan();
scan.addColumn(HBaseConstant.HBASE_PERSONA_FAMILY_MONTH_DAY, HBaseConstant.HBASE_PERSONA_ACTIVITE_DATE);
} @Override
protected TableName getTableName() {
return TableName.valueOf(parameterTool.get(HBaseConstant.HBASE_TABLE_NAME_PERSONA_DATA));
}
}

sink-hbase

import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.io.OutputFormat;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.mortbay.util.MultiPartWriter; import java.io.IOException; import static com.hecaiyun.common.bean.HBaseConstant.*; /**
* @Auther WeiJiQian
* @描述
*/
@Slf4j
public abstract class HBaseOutputFormatBase<T> implements OutputFormat<T> { protected final String valueString = "1";
protected String date ;
protected Table table ;
protected Connection connection;
protected BufferedMutatorParams params;
protected BufferedMutator mutator;
protected org.apache.hadoop.conf.Configuration configuration;
protected ParameterTool parameterTool; public abstract TableName getTableName(); public void configure(Configuration parameters) {
parameterTool = PropertiesUtil.PARAMETER_TOOL;
configuration = HBaseConfiguration.create();
configuration.set(HBASE_ZOOKEEPER_QUORUM, parameterTool.get(HBASE_ZOOKEEPER_QUORUM));
configuration.set(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, parameterTool.get(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
configuration.set(HBASE_RPC_TIMEOUT, parameterTool.get(HBASE_RPC_TIMEOUT));
configuration.set(HBASE_CLIENT_OPERATION_TIMEOUT, parameterTool.get(HBASE_CLIENT_OPERATION_TIMEOUT));
configuration.set(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, parameterTool.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));
} public void open(int taskNumber, int numTasks) throws IOException {
connection = ConnectionFactory.createConnection(configuration);
table = connection.getTable(getTableName());
params = new BufferedMutatorParams(table.getName());
//设置缓存的大小 100M
params.writeBufferSize(parameterTool.getLong(HBASE_WRITEBUFFER_SIZE));
mutator = connection.getBufferedMutator(params); } /*
* @author WeiJiQian
* @param rowKey
* @param family
* @param colum
* @param value
* @return org.apache.hadoop.hbase.client.Put
* 描述 覆盖数据
*/
public void putData(String rowKey,byte[] family, byte[] colum,String value ) throws IOException {
Put put = new Put(Bytes.toBytes(rowKey));
put.addColumn(family,colum,Bytes.toBytes(value));
put.setDurability(Durability.SKIP_WAL);
mutator.mutate(put);
} public void close() throws IOException {
if (mutator != null){
mutator.flush();
mutator.close();
}
if (table != null){
table.close();
}
if (connection != null){
connection.close();
} }
}

Flink连接器-批处理-读写Hbase的更多相关文章

  1. Spark读写Hbase的二种方式对比

    作者:Syn良子 出处:http://www.cnblogs.com/cssdongl 转载请注明出处 一.传统方式 这种方式就是常用的TableInputFormat和TableOutputForm ...

  2. 【原创】大叔经验分享(25)hive通过外部表读写hbase数据

    在hive中创建外部表: CREATE EXTERNAL TABLE hive_hbase_table(key string, name string,desc string) STORED BY ' ...

  3. Spark读写HBase

    Spark读写HBase示例 1.HBase shell查看表结构 hbase(main)::> desc 'SDAS_Person' Table SDAS_Person is ENABLED ...

  4. Flink批处理读写Hive

    import org.apache.flink.table.api.*; import org.apache.flink.table.catalog.hive.HiveCatalog; /** * @ ...

  5. flink连接器-流处理-读写redis

    写入redis resultStream.addSink(new RedisSink(FlinkUtils.getRedisSinkConfig(parameters),new MyRedisMapp ...

  6. Spark读写Hbase中的数据

    def main(args: Array[String]) { val sparkConf = new SparkConf().setMaster("local").setAppN ...

  7. Spark实战之读写HBase

    1 配置 1.1 开发环境: HBase:hbase-1.0.0-cdh5.4.5.tar.gz Hadoop:hadoop-2.6.0-cdh5.4.5.tar.gz ZooKeeper:zooke ...

  8. spark读写hbase性能对比

    一.spark写入hbase hbase client以put方式封装数据,并支持逐条或批量插入.spark中内置saveAsHadoopDataset和saveAsNewAPIHadoopDatas ...

  9. Spark学习笔记——读写Hbase

    1.首先在Hbase中建立一张表,名字为student 参考 Hbase学习笔记——基本CRUD操作 一个cell的值,取决于Row,Column family,Column Qualifier和Ti ...

随机推荐

  1. Route_of_Linux

    爬过六个陡坡,对Linux了如指掌 本文是极客时间App中刘超老师趣谈Linux操作系统的学习路径课程的学习笔记 抛弃旧思维习惯,熟练使用命令行 要从Windows的思维习惯,切换成Linux的命令行 ...

  2. 【VUE】8.VUEX核心概念

    1. Vuex核心概念主要如下 state : 存储共享数据 mutation: 变更store中的数据,方法,不能异步操作 action: 异步操作,通过触发mutation变更数据 getter: ...

  3. 第四章:动态规划I

    4.1背包问题 动态规划的核心:如何构造一个高效的备忘录,提高整个问题求解的效率. 4.2最大子数组问题II

  4. 蓝桥杯——测试次数·摔手机(2018JavaB组第4题,17分)

    x星球的居民脾气不太好,但好在他们生气的时候唯一的异常举动是:摔手机. 各大厂商也就纷纷推出各种耐摔型手机.x星球的质监局规定了手机必须经过耐摔测试,并且评定出一个耐摔指数来,之后才允许上市流通. x ...

  5. java实验作业1

    1 //1已知圆的半径为10,求其周长及面积 2 package calsswork3; 3 4 public class test3_1 { 5 //求周长 6 public static doub ...

  6. JVM(三)-java虚拟机类加载机制

    概述: 上一篇文章,介绍了java虚拟机的运行时区域,Java虚拟机根据不同的分工,把内存划分为各个不同的区域.在java程序中,最小的运行单元一般都是创建一个对象,然后调用对象的某个 方法.通过上一 ...

  7. 为什么SimpleDateFormat不是线程安全的?

    一.前言 日期的转换与格式化在项目中应该是比较常用的了,最近同事小刚出去面试实在是没想到被 SimpleDateFormat 给摆了一道... 面试官:项目中的日期转换怎么用的?SimpleDateF ...

  8. 第7.3节 Python特色的面向对象设计:协议、多态及鸭子类型

    Python是一种多态语言,其表现特征是:对象方法的调用方只管方法是否可调用,不管对象是什么类型,从而屏蔽不同类型对象之间的差异,写出通用的代码,做出通用的编程,以适应需求的不断变化. 一.    P ...

  9. Python函数独立星号(*)分隔的命名关键字参数

    如果需要限制关键字参数的输入名字,就需要使用到命名关键字参数的形式,所谓命名关键字参数就是给关键字参数限定指定的名字,输入其他名字不能识别.命名关键字参数和位置参数之间使用独立的星号(*)分隔,星号后 ...

  10. Python正则表达式\W+和\W*匹配过程的深入分析

    在学习re.split函数的处理过程中,发现执行如下语句及返回与老猿预想的不一致: >>> re.split('\W*','Hello,world') ['', 'H', 'e', ...