版权声明:本文为博主原创文章。未经博主同意不得转载。 https://blog.csdn.net/xiewenbo/article/details/25637931

package com.mr.test;

import java.io.IOException;

import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit; public class CombineSmallfileInputFormat extends CombineFileInputFormat<LongWritable, BytesWritable> { @Override
public RecordReader<LongWritable, BytesWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException { CombineFileSplit combineFileSplit = (CombineFileSplit) split;
CombineFileRecordReader<LongWritable, BytesWritable> recordReader = new CombineFileRecordReader<LongWritable, BytesWritable>(combineFileSplit, context, CombineSmallfileRecordReader.class);
try {
recordReader.initialize(combineFileSplit, context);
} catch (InterruptedException e) {
new RuntimeException("Error to initialize CombineSmallfileRecordReader.");
}
return recordReader;
} }
package com.mr.test;

import java.io.IOException;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; public class CombineSmallfileRecordReader extends RecordReader<LongWritable, BytesWritable> { private CombineFileSplit combineFileSplit;
private LineRecordReader lineRecordReader = new LineRecordReader();
private Path[] paths;
private int totalLength;
private int currentIndex;
private float currentProgress = 0;
private LongWritable currentKey;
private BytesWritable currentValue = new BytesWritable(); public CombineSmallfileRecordReader(CombineFileSplit combineFileSplit, TaskAttemptContext context, Integer index) throws IOException {
super();
this.combineFileSplit = combineFileSplit;
this.currentIndex = index; // 当前要处理的小文件Block在CombineFileSplit中的索引
} @Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
this.combineFileSplit = (CombineFileSplit) split;
// 处理CombineFileSplit中的一个小文件Block,由于使用LineRecordReader,须要构造一个FileSplit对象,然后才可以读取数据
FileSplit fileSplit = new FileSplit(combineFileSplit.getPath(currentIndex), combineFileSplit.getOffset(currentIndex), combineFileSplit.getLength(currentIndex), combineFileSplit.getLocations());
lineRecordReader.initialize(fileSplit, context); this.paths = combineFileSplit.getPaths();
totalLength = paths.length;
context.getConfiguration().set("map.input.file.name", combineFileSplit.getPath(currentIndex).getName());
} @Override
public LongWritable getCurrentKey() throws IOException, InterruptedException {
currentKey = lineRecordReader.getCurrentKey();
return currentKey;
} <strong><span style="color:#ff0000;"> @Override
public BytesWritable getCurrentValue() throws IOException, InterruptedException {
System.out.println("lineRecordReader:"+lineRecordReader.getCurrentValue().toString());
byte[] content = lineRecordReader.getCurrentValue().toString().getBytes();
System.out.println("content:"+new String(content));
currentValue = new BytesWritable();
currentValue.set(content, 0, content.length);
System.out.println("currentValue:"+new String(currentValue.getBytes()));
return currentValue;
}</span></strong>
public static void main(String args[]){
BytesWritable cv = new BytesWritable();
String str1 = "1234567";
String str2 = "123450";
cv.set(str1.getBytes(), 0, str1.getBytes().length);
System.out.println(new String(cv.getBytes())); cv.setCapacity(0); cv.set(str2.getBytes(), 0, str2.getBytes().length);
System.out.println(new String(cv.getBytes()));
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (currentIndex >= 0 && currentIndex < totalLength) {
return lineRecordReader.nextKeyValue();
} else {
return false;
}
} @Override
public float getProgress() throws IOException {
if (currentIndex >= 0 && currentIndex < totalLength) {
currentProgress = (float) currentIndex / totalLength;
return currentProgress;
}
return currentProgress;
} @Override
public void close() throws IOException {
lineRecordReader.close();
}
}

package com.mr.test;

import java.io.IOException;
import java.util.Iterator; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.GenericOptionsParser; public class BulkImportData { public static class TokenizerMapper extends
Mapper<Object, BytesWritable, Text, Text> {
public Text _key = new Text();
public Text _value = new Text();
public void map(Object key, BytesWritable value, Context context)
throws IOException, InterruptedException {
_value.set(value.getBytes());
String tmp = _value.toString().trim();
System.out.println(tmp);
tmp = tmp.replace("\\x00", "");
_value.set(tmp);
String filename = context.getConfiguration().get("map.input.file.name");
String[] splits = _value.toString().split(",");
if(splits.length==3){
filename = filename.replace("mv_", "");
filename = filename.replace(".txt", "");
_key.set(splits[0]+"_"+filename);
context.write(_key, _value);
}
}
}
public static class IntSumReducer extends
TableReducer<Text, Text, ImmutableBytesWritable> {
public void reduce(Text key, Iterable<Text> values,
Context context) throws IOException, InterruptedException {
Iterator<Text> itr = values.iterator();
while(itr.hasNext()){
Text t = itr.next();
String[] strs = t.toString().split(",");
if(strs.length!=3)continue;
Put put = new Put(key.getBytes());
put.add(Bytes.toBytes("content"), Bytes.toBytes("score"), Bytes.toBytes(strs[1].trim()));
put.add(Bytes.toBytes("content"), Bytes.toBytes("date"), Bytes.toBytes(strs[2].trim()));
context.write(new ImmutableBytesWritable(key.getBytes()), put);
}
}
} public static void main(String[] args) throws Exception {
String tablename = "ntf_data";
Configuration conf = HBaseConfiguration.create();
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.tableExists(tablename)) {
admin.disableTable(tablename);
admin.deleteTable(tablename);
}
HTableDescriptor htd = new HTableDescriptor(tablename);
HColumnDescriptor hcd = new HColumnDescriptor("content");
htd.addFamily(hcd);
admin.createTable(htd);
String[] otherArgs = new GenericOptionsParser(conf, args)
.getRemainingArgs();
if (otherArgs.length != 1) {
System.err
.println("Usage: wordcount <in> <out>" + otherArgs.length);
System.exit(2);
}
Job job = new Job(conf, "h");
job.setMapperClass(TokenizerMapper.class);
job.setJarByClass(BulkImportData.class);
job.setInputFormatClass(CombineSmallfileInputFormat.class);
job.setNumReduceTasks(5);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
TableMapReduceUtil.initTableReducerJob(tablename, IntSumReducer.class,
job);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

利用CombineFileInputFormat把netflix data set 导入到Hbase里的更多相关文章

  1. 利用HaoheDI从数据库抽取数据导入到hbase中

    下载apache-phoenix-4.14.0-HBase-1.4-bin.tar.gz 将其中的 phoenix-4.14.0-HBase-1.4-client.jar phoenix-core-4 ...

  2. 利用mapreduce将数据从hdfs导入到hbase遇到的问题

    现象: 15/08/12 10:19:30 INFO mapreduce.Job: Job job_1439396788627_0005 failed with state FAILED due to ...

  3. 利用TOAD实现把EXCEL数据导入oracle数据库

    利用TOAD实现把EXCEL数据导入oracle数据库 工具:   Toad11.7z(百度搜索,直接下载) 1.将Excel文件中某些字段导入到Oracle数据库的对应表 连接想要导入的数据库 ,然 ...

  4. shell编程系列24--shell操作数据库实战之利用shell脚本将文本数据导入到mysql中

    shell编程系列24--shell操作数据库实战之利用shell脚本将文本数据导入到mysql中 利用shell脚本将文本数据导入到mysql中 需求1:处理文本中的数据,将文本中的数据插入到mys ...

  5. 利用反射实现通用的excel导入导出

    如果一个项目中存在多种信息的导入导出,为了简化代码,就需要用反射实现通用的excel导入导出 实例代码如下: 1.创建一个 Book类,并编写set和get方法 package com.bean; p ...

  6. 利用CocoaPods,在项目中导入AFNetworking类库

    场景1:利用CocoaPods,在项目中导入AFNetworking类库 AFNetworking类库在GitHub地址是:https://github.com/AFNetworking/AFNetw ...

  7. 利用PHPExcel 实现excel数据的导入导出(源码实现)

    利用PHPExcel 实现excel数据的导入导出(源码实现) 在开发过程中,经常会遇到导入导出的需求,利用phpexcel类实现起来也是比较容易的,下面,我们一步一步实现 提前将phpexcel类下 ...

  8. mysql中使用load data infile导入数据的用法

    有时需要将大量数据批量写入数据库,直接使用程序语言和Sql写入往往很耗时间,其中有一种方案就是使用mysql load data infile导入文件的形式导入数据,这样可大大缩短数据导入时间. LO ...

  9. Mysql load data infile 导入数据出现:Data truncated for column

    [1]Mysql load data infile 导入数据出现:Data truncated for column .... 可能原因分析: (1)数据库表对应字段类型长度不够或修改为其他数据类型( ...

随机推荐

  1. 演示-JQuery属性选择器

    <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/ ...

  2. linux下如何关闭防火墙、查看当前的状态、开放端口

    从配置菜单关闭防火墙是不起作用的,索性在安装的时候就不要装防火墙查看防火墙状态:/etc/init.d/iptables status暂时关闭防火墙:/etc/init.d/iptables stop ...

  3. NHibernate初学三之条件查询(Criteria Queries)与AspNetPager分页实例

    NHibernate除了SQL与HQL两种查询操作外,还有一种就是条件查询Criteria,本文将从网上整理一些Criteria的理论及小实例,最后通过一个结合AspNetPager分页来加深理解,必 ...

  4. HDU2717BFS

    /* WA了12发简直不能忍! . 题意非常简单.从正整数a变为b有三种方法: +1,-1.*2 特殊情况一:a与b相等不须要搜索 特殊情况二:a>b时.结果必定是a-b不需搜 特殊情况三:比較 ...

  5. HTML&CSS精选笔记_列表与超链接

    列表与超链接 列表标记 无序列表ul 无序列表的各个列表项之间没有顺序级别之分,是并列的 <ul> <li>列表项1</li> <li>列表项2< ...

  6. VS-Qt-OSG-CMake基本项目框架

    在VS-Qt-CMake基础上,打开mainwindow.ui,添加一个QWidget,然后在widget上右键-提升,选择SceneViewWidget CMakeLists.txt中添加了OSG相 ...

  7. fgetc read缓冲机制区别

    read属于系统调用,它的缓存是基于内核的缓冲,是记在内核空间的. 而fgetc是标准函数, 是在用户空间I/O缓冲区的 比如用fgetc读一个字节,fgetc有可能从内核中预读1024个字节到I/O ...

  8. React如何进行事件传参

    今天在学习React的es6语法的时候,发现了个有趣的现象,就是this的指向问题.es6的this不同于es5,它在创立函数伊始便已经存在了,而不是像es5一样,睡调用的函数,this指向谁.但是这 ...

  9. Receiver type for instance message is a forward

    本文转载至 http://my.oschina.net/sunqichao/blog?disp=2&catalog=0&sort=time&p=3 这往往是引用的问题.ARC要 ...

  10. tableView删除功能小记

    由于项目需要,做一个UITableView来实现删除功能. 效果如图: 功能思路其实不难: 交代一下,我自己要实现的效果: 1.TableView是分组的. 2.点击删除按钮后,某行被删除.   写完 ...