pig自带的pigstorage不能指定行分隔符,所以自己重写了一个简单的UDF类,可以指定列和行的分隔符,之前研究过的简单的,

http://blog.csdn.net/ruishenh/article/details/12048067

但是弊端大,所以这次重写一下。

操作步骤打好包上传到服务器,

grunt> register  /home/pig/pig-0.11.0/udflib/myStorage.jar

grunt> cat student; 1,xiaohouzi,25/2,xiaohouzi2,24/3,xiaohouzi3,23

grunt> a = load 'student' using com.hcr.hadoop.pig.MyStorage(',','/');

grunt> dump a;

(1,xiaohouzi,25) (2,xiaohouzi2,24) (3,xiaohouzi3,23)

grunt> store a into 'myStorageOut' using com.hcr.hadoop.pig.MyStorage(',','/');

执行提示成功后查看

grunt> cat myStorageOut 1,xiaohouzi,25/2,xiaohouzi2,24/3,xiaohouzi3,23/

源码类

 package com.hcr.hadoop.pig;

 import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.LineRecordReader;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.pig.Expression;
import org.apache.pig.LoadFunc;
import org.apache.pig.LoadMetadata;
import org.apache.pig.PigException;
import org.apache.pig.ResourceSchema;
import org.apache.pig.ResourceSchema.ResourceFieldSchema;
import org.apache.pig.ResourceStatistics;
import org.apache.pig.StoreFunc;
import org.apache.pig.StoreFuncInterface;
import org.apache.pig.backend.executionengine.ExecException;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit;
import org.apache.pig.data.DataType;
import org.apache.pig.data.Tuple;
import org.apache.pig.data.TupleFactory;
import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
import org.apache.pig.impl.util.StorageUtil; public class MyStorage extends LoadFunc implements StoreFuncInterface,LoadMetadata { private static final Log LOG = LogFactory.getLog(MyStorage.class); private static final String utf8 = "UTF-8"; private static String fieldDel = "\t"; private static String recordDel = "\n"; protected RecordReader recordReader = null; protected RecordWriter writer = null; public MyStorage() {
} public MyStorage(String fieldDel) {
this(fieldDel, "\n");
} public MyStorage(String fieldDel, String recordDel) {
this.fieldDel = fieldDel;
this.recordDel = recordDel;
} @Override
public void setLocation(String s, Job job) throws IOException {
FileInputFormat.setInputPaths(job, s);
} @Override
public InputFormat getInputFormat() throws IOException {
return new MyStorageInputFormat(recordDel);
} @Override
public void prepareToRead(RecordReader recordReader, PigSplit pigSplit)
throws IOException {
this.recordReader = recordReader;
} @Override
public Tuple getNext() throws IOException {
try {
boolean flag = recordReader.nextKeyValue();
if (!flag) {
return null;
}
Text value = (Text) recordReader.getCurrentValue();
String[] strArray = value.toString().split(fieldDel);
List lst = new ArrayList<String>();
int i = 0;
for (String singleItem : strArray) {
lst.add(i++, singleItem);
}
return TupleFactory.getInstance().newTuple(lst);
} catch (InterruptedException e) {
throw new ExecException("Read data error",
PigException.REMOTE_ENVIRONMENT, e);
}
} /**
* */
@Override
public String relToAbsPathForStoreLocation(String location, Path curDir)
throws IOException {
return LoadFunc.getAbsolutePath(location, curDir);
} @Override
public OutputFormat getOutputFormat() throws IOException {
return new MyStorageOutputFormat(StorageUtil.parseFieldDel(fieldDel),
this.recordDel);
} @Override
public void setStoreLocation(String location, Job job) throws IOException {
job.getConfiguration().set("mapred.textoutputformat.separator", "");
FileOutputFormat.setOutputPath(job, new Path(location));
if ("true".equals(job.getConfiguration().get(
"output.compression.enabled"))) {
FileOutputFormat.setCompressOutput(job, true);
String codec = job.getConfiguration().get(
"output.compression.codec");
try {
FileOutputFormat.setOutputCompressorClass(job,
(Class<? extends CompressionCodec>) Class
.forName(codec));
} catch (ClassNotFoundException e) {
throw new RuntimeException("Class not found: " + codec);
}
} else {
// This makes it so that storing to a directory ending with ".gz" or
// ".bz2" works.
setCompression(new Path(location), job);
} } private void setCompression(Path path, Job job) {
String location = path.getName();
if (location.endsWith(".bz2") || location.endsWith(".bz")) {
FileOutputFormat.setCompressOutput(job, true);
FileOutputFormat.setOutputCompressorClass(job, BZip2Codec.class);
} else if (location.endsWith(".gz")) {
FileOutputFormat.setCompressOutput(job, true);
FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
} else {
FileOutputFormat.setCompressOutput(job, false);
}
} @Override
public void checkSchema(ResourceSchema s) throws IOException {
// TODO Auto-generated method stub } @Override
public void prepareToWrite(RecordWriter writer) throws IOException {
this.writer = writer;
} @Override
public void putNext(Tuple t) throws IOException {
try {
writer.write(null, t);
} catch (InterruptedException e) {
throw new IOException(e);
}
} @Override
public void setStoreFuncUDFContextSignature(String signature) {
// TODO Auto-generated method stub } @Override
public void cleanupOnFailure(String location, Job job) throws IOException {
StoreFunc.cleanupOnFailureImpl(location, job);
} @Override
public void cleanupOnSuccess(String location, Job job) throws IOException {
// TODO Auto-generated method stub } @Override
public ResourceSchema getSchema(String location, Job job)
throws IOException {
ResourceSchema rs=new ResourceSchema();
FieldSchema c1 = new FieldSchema("c1", DataType.INTEGER);
FieldSchema c2 = new FieldSchema("c2", DataType.INTEGER);
FieldSchema c3 = new FieldSchema("c3", DataType.DOUBLE);
ResourceFieldSchema fs1 =new ResourceFieldSchema(c1);
ResourceFieldSchema fs2 =new ResourceFieldSchema(c2);
ResourceFieldSchema fs3 =new ResourceFieldSchema(c3);
rs.setFields(new ResourceFieldSchema[]{fs1,fs2,fs3});
return rs;
} @Override
public ResourceStatistics getStatistics(String location, Job job)
throws IOException {
// TODO Auto-generated method stub
return null;
} @Override
public String[] getPartitionKeys(String location, Job job)
throws IOException {
// TODO Auto-generated method stub
return null;
} @Override
public void setPartitionFilter(Expression partitionFilter)
throws IOException {
// TODO Auto-generated method stub }
} class MyStorageInputFormat extends TextInputFormat { private final String recordDel; public MyStorageInputFormat(String recordDel) {
this.recordDel = recordDel;
} @Override
public RecordReader<LongWritable, Text> createRecordReader(
InputSplit split, TaskAttemptContext context) {
String delimiter = context.getConfiguration().get(
"textinputformat.record.delimiter");
if (recordDel != null) {
delimiter = recordDel;
}
byte[] recordDelimiterBytes = null;
if (null != delimiter){
try {
recordDelimiterBytes = decode(delimiter).getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
return new LineRecordReader(recordDelimiterBytes);
}
/**
* 工作流传过来的列分隔符,有可能是特殊字符,用八进制或者十六进制表示
* @throws IOException
*/
public static String decode(String str) throws IOException {
String re = str;
if (str != null && str.startsWith("\\")) {
str = str.substring(1, str.length());
String[] chars = str.split("\\\\");
byte[] bytes = new byte[chars.length];
for (int i = 0; i < chars.length; i++) {
if (chars[i].equals("t")) {
bytes[i] = 9;
} else if (chars[i].equals("r")) {
bytes[i] = 13;
} else if (chars[i].equals("n")) {
bytes[i] = 10;
} else if (chars[i].equals("b")) {
bytes[i] = 8;
} else {
bytes[i] = Byte.decode(chars[i]);
}
}
try {
re = new String(bytes, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IOException(str, e);
}
}
return re;
} } class MyStorageOutputFormat extends TextOutputFormat<WritableComparable, Tuple> { private final byte fieldDel; private final String recordDel; public MyStorageOutputFormat(byte delimiter) {
this(delimiter, "\n");
} public MyStorageOutputFormat(byte delimiter, String recordDel) {
this.fieldDel = delimiter;
this.recordDel = recordDel;
} protected static class MyRecordWriter extends
TextOutputFormat.LineRecordWriter<WritableComparable, Tuple> { private static byte[] newline; private final byte fieldDel; public MyRecordWriter(DataOutputStream out, byte fieldDel)
throws UnsupportedEncodingException {
this(out, fieldDel, "\n".getBytes("UTF-8"));
} public MyRecordWriter(DataOutputStream out, byte fieldDel, byte[] record) {
super(out);
this.fieldDel = fieldDel;
this.newline = record;
} public synchronized void write(WritableComparable key, Tuple value)
throws IOException {
int sz = value.size();
for (int i = 0; i < sz; i++) {
StorageUtil.putField(out, value.get(i));
if (i != sz - 1) {
out.writeByte(fieldDel);
}
}
out.write(newline);
}
} @Override
public RecordWriter<WritableComparable, Tuple> getRecordWriter(
TaskAttemptContext job) throws IOException, InterruptedException {
Configuration conf = job.getConfiguration();
boolean isCompressed = getCompressOutput(job);
CompressionCodec codec = null;
String extension = "";
if (isCompressed) {
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(
job, GzipCodec.class);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass,
conf);
extension = codec.getDefaultExtension();
}
Path file = getDefaultWorkFile(job, extension);
FileSystem fs = file.getFileSystem(conf);
if (!isCompressed) {
FSDataOutputStream fileOut = fs.create(file, false);
return new MyRecordWriter(fileOut, fieldDel,
this.recordDel.getBytes());
} else {
FSDataOutputStream fileOut = fs.create(file, false);
return new MyRecordWriter(new DataOutputStream(
codec.createOutputStream(fileOut)), fieldDel,
this.recordDel.getBytes());
}
} }
grunt> register  /home/pig/pig-0.11.0/udflib/myStorage.jar
grunt> cat X;
keyDataKNZKCZY:ZDKJS:616150:AFS:3842708d_20131219194420-642464756keyDataKNZKCZY:ZDKJS:616614:AFS:3843920d_20131219194420-642464756keyDataKNZKCZY:ZDKJS:616661:AFS:3844040d_20131219194420-642464756
grunt> a = load 'X' using com.hcr.hadoop.pig.MyStorage('\\001','\\002');
grunt> dump a;
(keyData,KNZKCZY:ZDKJS:616150:AFS:3842708,d_20131219194420-642464756)
(keyData,KNZKCZY:ZDKJS:616614:AFS:3843920,d_20131219194420-642464756)
(keyData,KNZKCZY:ZDKJS:616661:AFS:3844040,d_20131219194420-642464756)
grunt>

有的时候如果加载模式不想指定具体模式(比如太多了字段,或者不够公有化)就想使用已存在的模式

实现LoadMetadata接口,然后

重写

@Override

public ResourceSchema getSchema(String location, Job job)throws IOException {

ResourceSchema rs=new ResourceSchema();

FieldSchema c1 = new FieldSchema("c1", DataType.INTEGER);

FieldSchema c2 = new FieldSchema("c2", DataType.INTEGER);

FieldSchema c3 = new FieldSchema("c3", DataType.DOUBLE);

ResourceFieldSchema fs1 =new ResourceFieldSchema(c1);

ResourceFieldSchema fs2 =new ResourceFieldSchema(c2);

ResourceFieldSchema fs3 =new ResourceFieldSchema(c3);

rs.setFields(new ResourceFieldSchema[]{fs1,fs2,fs3});

return rs;

}

这一个简单的例子中就返回了直接使用模式的形式

grunt> register  /home/pig/pig-0.11.0/udflib/myStorage.jar

grunt> a = load 'student' using com.hcr.hadoop.pig.MyStorage(',','/');

grunt> describe a; a: {c1: int,c2: int,c3: double}

grunt> b = foreach  a generate c1,c2,c3;

grunt> describe b;

b: {c1: int,c2: int,c3: double}

摘录地址:http://blog.csdn.net/ruishenh/article/details/12192391

(转)Pig 重写加载函数和存储函数UDF的更多相关文章

  1. js加载事件和js函数定义

    一  dom文档树加载完之后执行一个函数 在Dom加载完成后执行函数,下面这三个的作用是一样的,window.onload 是JavaScript的,window.onload是在dom文档树加载完和 ...

  2. PostgreSql扩展Sql-动态加载共享库(C函数)

    基于 psql (PostgreSQL) 10.4 pg_language表定义了函数实现所使用的语言.主要支持了C语言和SQL语句.一些可选的语言包括pl/pgsql.tcl和perl. ligan ...

  3. 常用js,css文件统一加载方法,并在加载之后调用回调函数

    原创内容,转载请注明出处! 为了方便资源管理和提升工作效率,常用的js和css文件的加载应该放在一个统一文件里面完成,也方便后续的资源维护.所以我用js写了以下方法,存放在“sourceControl ...

  4. php自动加载的两个函数__autoload和__sql_autoload_register

    一.__autoload 这是一个自动加载函数,在PHP5中,当我们实例化一个未定义的类时,就会触发此函数.看下面例子: printit.class.php //文件 <?php class P ...

  5. 010.CI4框架CodeIgniter, autoload自动加载自己的helper函数类

    01.自己定义了一个helper类,里面有个函数用来输出 02.定义一个Controller基本类,我们以后用到的Controllers类都继承自这个类.其中自动加载helper函数如图所示: 03. ...

  6. 八、React实战:可交互待办事务表(表单使用、数据的本地缓存local srtorage、生命同期函数(页面加载就会执行函数名固定为componentDidMount()))

    一.项目功能概述 示例网址:http://www.todolist.cn/ 功能: 输入待做事项,回车,把任务添加到 [正在进行] [正在进行] 任务,勾选之后,变成已[经完成事项] [已完成事务], ...

  7. composer的autoload来自动加载自己编写的函数库与类库?

    1.使用命令composer init生成composer.json文件,并编辑autoload选项内容如下: 其中又包含主要的两个选项: files 和 psr-4. files就是需要compos ...

  8. angular 页面加载时可以调用 函数处理

    转载于 作者:海底苍鹰地址:http://blog.51yip.com/jsjquery/1599.html 我希望页面加载的时候,我能马上处理页面的数据,如请求API .... 所以这样设置 在某个 ...

  9. 页面框架加载完自动执行函数$(function(){});

    页面中有一些大的资源文件,如图片,声音等,如果一个事件绑定写在这些加载资源代码的下方,那么要等资源加载完才会绑定,这样体验不够好. 于是想不等资源加载完,只要框架加载完成就绑定事件,就可以把代码放在以 ...

随机推荐

  1. CSS Link(链接)

    CSS Link(链接) 不同的链接可以有不同的样式. 一.链接样式 链接的样式,可以用任何CSS属性(如颜色,字体,背景等). 特别的链接,可以有不同的样式,这取决于他们是什么状态. 这四个链接状态 ...

  2. 20145324 Java实验一

    北京电子科技学院(BESTI) 实 验 报 告 课程:JAVA 班级:1453 姓名:王嘉澜 学号:20145324 成绩: 指导教师:娄嘉鹏 实验日期:2016.4.8 实验密级: 预习程度: 实验 ...

  3. 20145331 《Java程序设计》第3周学习总结

    20145331 <Java程序设计>第3周学习总结 教材学习内容总结 第四章 认识对象 •对象(Object):存在的具体实体,具有明确的状态和行为 •类(Class):具有相同属性和行 ...

  4. 记一次redis key丢失的问题排查

    最近测试环境的redis经常性发生某些key丢失的问题,最终的找到的问题让人大吃一惊. 复盘一下步骤: 1.发现问题 不知道从某天开始,后台经常报错,原因是某些key丢失,一开始不在意,以为是小bug ...

  5. Eclipse安装zylin[转]

    本文转载自:https://blog.csdn.net/dns888222/article/details/9263485 Eclipse安装zylin 在网上搜的是安装页为http://www.zy ...

  6. Btrace使用入门

    1.什么是BTrace BTrace是sun公司推出的一款Java 动态.安全追踪(监控)工具,可以在不用重启的情况下监控系统运行情况,方便的获取程序运行时的数据信息,如方法参数.返回值.全局变量和堆 ...

  7. uboot下如何查看内存里的数据

    答:使用md工具 md.b $address $count (从地址$address处显示$count个字节的数据,b=byte,8位) md.w $address $count (从地址$addre ...

  8. CSS控制滚动条的样式

    到今天(2018年10月25日)为止, 这还是chrome上的一个实验性特性: ::-webkit-scrollbar{width:4px;height:4px;} ::-webkit-scrollb ...

  9. Caffe学习笔记(一):Caffe架构及其模型解析

    Caffe学习笔记(一):Caffe架构及其模型解析 写在前面:关于caffe平台如何快速搭建以及如何在caffe上进行训练与预测,请参见前面的文章<caffe平台快速搭建:caffe+wind ...

  10. hbase(一)region

    前言 文章不含源码,只是一些官方资料的整理和个人理解 架构总览 这张图在大街小巷里都能看到,感觉是hbase架构中最详细最清晰的一张,稍微再补充几点. 1) Hlog是低版本hbase术语,现在称为W ...