【Hadoop】Hadoop MR 自定义序列化类
1、基本概念
2、Mapper代码
package com.ares.hadoop.mr.flowsum; import java.io.IOException; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger; import com.ares.hadoop.mr.wordcount.MRTest; //Long, String, String, Long --> LongWritable, Text, Text, LongWritable
public class FlowSumMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
private static final Logger LOGGER = Logger.getLogger(MRTest.class); private String line;
private int length;
private final static char separator = '\t'; private String phoneNum;
private long upFlow;
private long downFlow;
//private long sumFlow; private Text text = new Text();
private FlowBean flowBean = new FlowBean(); @Override
protected void map(LongWritable key, Text value,
Mapper<LongWritable, Text, Text, FlowBean>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.map(key, value, context);
line = value.toString();
String[] fields = StringUtils.split(line, separator);
length = fields.length;
if (length != ) {
LOGGER.error(key.get() + ", " + line + " LENGTH INVALID, IGNORE...");
} phoneNum = fields[];
try {
upFlow = Long.parseLong(fields[length-]);
downFlow = Long.parseLong(fields[length-]);
//sumFlow = upFlow + downFlow;
} catch (Exception e) {
// TODO: handle exception
LOGGER.error(key.get() + ", " + line + " FLOW DATA INVALID, IGNORE...");
} flowBean.setPhoneNum(phoneNum);
flowBean.setUpFlow(upFlow);
flowBean.setDownFlow(downFlow);
//flowBean.setSumFlow(sumFlow); text.set(phoneNum);
context.write(text, flowBean);
}
}
3、Reducer代码
package com.ares.hadoop.mr.flowsum; import java.io.IOException; import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; public class FlowSumReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
//private static final Logger LOGGER = Logger.getLogger(MRTest.class); private FlowBean flowBean = new FlowBean(); @Override
protected void reduce(Text key, Iterable<FlowBean> values,
Reducer<Text, FlowBean, Text, FlowBean>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.reduce(arg0, arg1, arg2);
long upFlowCounter = ;
long downFlowCounter = ; for (FlowBean flowBean : values) {
upFlowCounter += flowBean.getUpFlow();
downFlowCounter += flowBean.getDownFlow();
}
flowBean.setPhoneNum(key.toString());
flowBean.setUpFlow(upFlowCounter);
flowBean.setDownFlow(downFlowCounter);
flowBean.setSumFlow(upFlowCounter + downFlowCounter); context.write(key, flowBean);
}
}
4、序列化Bean代码
package com.ares.hadoop.mr.flowsum; import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException; import org.apache.hadoop.io.Writable; public class FlowBean implements Writable {
private String phoneNum;
private long upFlow;
private long downFlow;
private long sumFlow; public FlowBean() {
// TODO Auto-generated constructor stub
}
// public FlowBean(String phoneNum, long upFlow, long downFlow, long sumFlow) {
// super();
// this.phoneNum = phoneNum;
// this.upFlow = upFlow;
// this.downFlow = downFlow;
// this.sumFlow = sumFlow;
// } public String getPhoneNum() {
return phoneNum;
} public void setPhoneNum(String phoneNum) {
this.phoneNum = phoneNum;
} public long getUpFlow() {
return upFlow;
} public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
} public long getDownFlow() {
return downFlow;
} public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
} public long getSumFlow() {
return sumFlow;
} public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
} @Override
public void readFields(DataInput in) throws IOException {
// TODO Auto-generated method stub
phoneNum = in.readUTF();
upFlow = in.readLong();
downFlow = in.readLong();
sumFlow = in.readLong();
} @Override
public void write(DataOutput out) throws IOException {
// TODO Auto-generated method stub
out.writeUTF(phoneNum);
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
} @Override
public String toString() {
return "" + upFlow + "\t" + downFlow + "\t" + sumFlow;
} }
5、TestRunner代码
package com.ares.hadoop.mr.flowsum; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger; public class FlowSumRunner extends Configured implements Tool {
private static final Logger LOGGER = Logger.getLogger(FlowSumRunner.class); @Override
public int run(String[] args) throws Exception {
// TODO Auto-generated method stub
LOGGER.debug("MRTest: MRTest STARTED..."); if (args.length != ) {
LOGGER.error("MRTest: ARGUMENTS ERROR");
System.exit(-);
} Configuration conf = new Configuration();
//FOR Eclipse JVM Debug
//conf.set("mapreduce.job.jar", "flowsum.jar");
Job job = Job.getInstance(conf); // JOB NAME
job.setJobName("flowsum"); // JOB MAPPER & REDUCER
job.setJarByClass(FlowSumRunner.class);
job.setMapperClass(FlowSumMapper.class);
job.setReducerClass(FlowSumReducer.class); // MAP & REDUCE
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// MAP
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class); // JOB INPUT & OUTPUT PATH
//FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.setInputPaths(job, args[]);
FileOutputFormat.setOutputPath(job, new Path(args[])); // VERBOSE OUTPUT
if (job.waitForCompletion(true)) {
LOGGER.debug("MRTest: MRTest SUCCESSFULLY...");
return ;
} else {
LOGGER.debug("MRTest: MRTest FAILED...");
return ;
} } public static void main(String[] args) throws Exception {
int result = ToolRunner.run(new Configuration(), new FlowSumRunner(), args);
System.exit(result);
} }
参考资料:
http://www.cnblogs.com/robert-blue/p/4157768.html
http://www.cnblogs.com/qlee/archive/2011/05/18/2049610.html
http://blog.163.com/lzm07@126/blog/static/25705468201331611857190/
http://blog.csdn.net/lastsweetop/article/details/9193907
【Hadoop】Hadoop MR 自定义序列化类的更多相关文章
- Hadoop【MR开发规范、序列化】
Hadoop[MR开发规范.序列化] 目录 Hadoop[MR开发规范.序列化] 一.MapReduce编程规范 1.Mapper阶段 2.Reducer阶段 3.Driver阶段 二.WordCou ...
- hadoop提交作业自定义排序和分组
现有数据如下: 3 3 3 2 3 1 2 2 2 1 1 1 要求为: 先按第一列从小到大排序,如果第一列相同,按第二列从小到大排序 如果是hadoop默认的排序方式,只能比较key,也就是第一列, ...
- hadoop深入研究:(十三)——序列化框架
hadoop深入研究:(十三)--序列化框架 Mapreduce之序列化框架(转自http://blog.csdn.net/lastsweetop/article/details/9376495) 框 ...
- 在hadoop作业中自定义分区和归约
当遇到有特殊的业务需求时,需要对hadoop的作业进行分区处理 那么我们可以通过自定义的分区类来实现 还是通过单词计数的例子,JMapper和JReducer的代码不变,只是在JSubmit中改变了设 ...
- 【Hadoop】MapReduce自定义分区Partition输出各运营商的手机号码
MapReduce和自定义Partition MobileDriver主类 package Partition; import org.apache.hadoop.io.NullWritable; i ...
- 为什么hadoop中用到的序列化不是java的serilaziable接口去序列化而是使用Writable序列化框架
继上一个模块之后,此次分析的内容是来到了Hadoop IO相关的模块了,IO系统的模块可谓是一个比较大的模块,在Hadoop Common中的io,主要包括2个大的子模块构成,1个是以Writable ...
- Hadoop【MR的分区、排序、分组】
[toc] 一.分区 问题:按照条件将结果输出到不同文件中 自定义分区步骤 1.自定义继承Partitioner类,重写getPartition()方法 2.在job驱动Driver中设置自定义的Pa ...
- hadoop修改MR的提交的代码程序的副本数
hadoop修改MR的提交的代码程序的副本数 Under-Replicated Blocks的数量很多,有7万多个.hadoop fsck -blocks 检查发现有很多replica missing ...
- Hadoop streaming使用自定义python版本和第三方库
在使用Hadoop的过程中,遇到了自带python版本比较老的问题. 下面以python3.7为例,演示如何在hadoop上使用自定义的python版本以及第三方库. 1.在https://www.p ...
随机推荐
- Hadoop入门(五) Hadoop2.7.5集群分布式环境搭建
本文接上文内容继续: server01 192.168.8.118 jdk.www.fengshen157.com/ hadoop NameNode.DFSZKFailoverController(z ...
- 【CZY选讲·次大公因数】
题目描述 给定n个数ai,求sgcd(a1,a1),sgcd(a1,a2),…,sgcd(a1,an). 其中sgcd(x,y)表示x和y的次大公因数.若不存在次大公因数,sgcd(x,y)=-1 ...
- Vitamio介绍及使用
一.Vitamio介绍 1.1 Vitamio是什么? Vitamio是Android平台视音频播放组件,支持播放几乎格式的视频以及主流网络视频流(http/rtsp/mms等),详细的中文介绍: 这 ...
- 文件搜索工具everything
Everything是voidtools开发的一款文件搜索工具,官网描述为“基于名称实时定位文件和目录(Locate files and folders by name instantly) (“Ev ...
- 编写Shell脚本的最佳实践,规范二
需要养成的习惯如下: 代码有注释 #!/bin/bash # Written by steven # Name: mysqldump.sh # Version: v1.0 # Parameters : ...
- 洛谷T8116 密码
T8116 密码 题目描述 YJC把核弹发射密码忘掉了……其实是密码被加密了,但是YJC不会解密.密码由n个数字组成,第i个数字被加密成了如下形式:第k小的满足(2^L)|(P-1)且P为质数的P.Y ...
- Linux/Android——input_handler之evdev (四) 【转】
转自:http://blog.csdn.net/u013491946/article/details/72638919 版权声明:免责声明: 本人在此发文(包括但不限于汉字.拼音.拉丁字母)均为随意敲 ...
- gdb 调试打印
gdb查看指定地址的内存地址的值:examine 简写 x-----使用gdb> help x 来查看使用方式 x/ (n,f,u为可选参数) n: 需要显示的内存单元个数,也就是从当前地址向后 ...
- Git 使用指南(cmd + gui)
git 使用简易指南http://www.bootcss.com/p/git-guide/ Git版本控制使用方法入门教程http://www.uml.org.cn/pzgl/201204285.as ...
- MSSQL横列转纵列
上篇我们说到了纵列转横列,这篇讲下横列转纵列,具体代码: 1.建表 CREATE TABLE [dbo].[EndLongChangeAcross]( ,) NOT NULL, ) NOT NULL, ...