1、基本概念

2、Mapper代码

package com.ares.hadoop.mr.flowsum;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger; import com.ares.hadoop.mr.wordcount.MRTest; //Long, String, String, Long --> LongWritable, Text, Text, LongWritable
public class FlowSumMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
private static final Logger LOGGER = Logger.getLogger(MRTest.class); private String line;
private int length;
private final static char separator = '\t'; private String phoneNum;
private long upFlow;
private long downFlow;
//private long sumFlow; private Text text = new Text();
private FlowBean flowBean = new FlowBean(); @Override
protected void map(LongWritable key, Text value,
Mapper<LongWritable, Text, Text, FlowBean>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.map(key, value, context);
line = value.toString();
String[] fields = StringUtils.split(line, separator);
length = fields.length;
if (length != ) {
LOGGER.error(key.get() + ", " + line + " LENGTH INVALID, IGNORE...");
} phoneNum = fields[];
try {
upFlow = Long.parseLong(fields[length-]);
downFlow = Long.parseLong(fields[length-]);
//sumFlow = upFlow + downFlow;
} catch (Exception e) {
// TODO: handle exception
LOGGER.error(key.get() + ", " + line + " FLOW DATA INVALID, IGNORE...");
} flowBean.setPhoneNum(phoneNum);
flowBean.setUpFlow(upFlow);
flowBean.setDownFlow(downFlow);
//flowBean.setSumFlow(sumFlow); text.set(phoneNum);
context.write(text, flowBean);
}
}

3、Reducer代码

package com.ares.hadoop.mr.flowsum;

import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; public class FlowSumReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
//private static final Logger LOGGER = Logger.getLogger(MRTest.class); private FlowBean flowBean = new FlowBean(); @Override
protected void reduce(Text key, Iterable<FlowBean> values,
Reducer<Text, FlowBean, Text, FlowBean>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.reduce(arg0, arg1, arg2);
long upFlowCounter = ;
long downFlowCounter = ; for (FlowBean flowBean : values) {
upFlowCounter += flowBean.getUpFlow();
downFlowCounter += flowBean.getDownFlow();
}
flowBean.setPhoneNum(key.toString());
flowBean.setUpFlow(upFlowCounter);
flowBean.setDownFlow(downFlowCounter);
flowBean.setSumFlow(upFlowCounter + downFlowCounter); context.write(key, flowBean);
}
}

4、序列化Bean代码

package com.ares.hadoop.mr.flowsum;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException; import org.apache.hadoop.io.Writable; public class FlowBean implements Writable {
private String phoneNum;
private long upFlow;
private long downFlow;
private long sumFlow; public FlowBean() {
// TODO Auto-generated constructor stub
}
// public FlowBean(String phoneNum, long upFlow, long downFlow, long sumFlow) {
// super();
// this.phoneNum = phoneNum;
// this.upFlow = upFlow;
// this.downFlow = downFlow;
// this.sumFlow = sumFlow;
// } public String getPhoneNum() {
return phoneNum;
} public void setPhoneNum(String phoneNum) {
this.phoneNum = phoneNum;
} public long getUpFlow() {
return upFlow;
} public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
} public long getDownFlow() {
return downFlow;
} public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
} public long getSumFlow() {
return sumFlow;
} public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
} @Override
public void readFields(DataInput in) throws IOException {
// TODO Auto-generated method stub
phoneNum = in.readUTF();
upFlow = in.readLong();
downFlow = in.readLong();
sumFlow = in.readLong();
} @Override
public void write(DataOutput out) throws IOException {
// TODO Auto-generated method stub
out.writeUTF(phoneNum);
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
} @Override
public String toString() {
return "" + upFlow + "\t" + downFlow + "\t" + sumFlow;
} }

5、TestRunner代码

package com.ares.hadoop.mr.flowsum;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger; public class FlowSumRunner extends Configured implements Tool {
private static final Logger LOGGER = Logger.getLogger(FlowSumRunner.class); @Override
public int run(String[] args) throws Exception {
// TODO Auto-generated method stub
LOGGER.debug("MRTest: MRTest STARTED..."); if (args.length != ) {
LOGGER.error("MRTest: ARGUMENTS ERROR");
System.exit(-);
} Configuration conf = new Configuration();
//FOR Eclipse JVM Debug
//conf.set("mapreduce.job.jar", "flowsum.jar");
Job job = Job.getInstance(conf); // JOB NAME
job.setJobName("flowsum"); // JOB MAPPER & REDUCER
job.setJarByClass(FlowSumRunner.class);
job.setMapperClass(FlowSumMapper.class);
job.setReducerClass(FlowSumReducer.class); // MAP & REDUCE
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// MAP
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class); // JOB INPUT & OUTPUT PATH
//FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.setInputPaths(job, args[]);
FileOutputFormat.setOutputPath(job, new Path(args[])); // VERBOSE OUTPUT
if (job.waitForCompletion(true)) {
LOGGER.debug("MRTest: MRTest SUCCESSFULLY...");
return ;
} else {
LOGGER.debug("MRTest: MRTest FAILED...");
return ;
} } public static void main(String[] args) throws Exception {
int result = ToolRunner.run(new Configuration(), new FlowSumRunner(), args);
System.exit(result);
} }

参考资料:

http://www.cnblogs.com/robert-blue/p/4157768.html

http://www.cnblogs.com/qlee/archive/2011/05/18/2049610.html

http://blog.163.com/lzm07@126/blog/static/25705468201331611857190/

http://blog.csdn.net/lastsweetop/article/details/9193907

【Hadoop】Hadoop MR 自定义序列化类的更多相关文章

  1. Hadoop【MR开发规范、序列化】

    Hadoop[MR开发规范.序列化] 目录 Hadoop[MR开发规范.序列化] 一.MapReduce编程规范 1.Mapper阶段 2.Reducer阶段 3.Driver阶段 二.WordCou ...

  2. hadoop提交作业自定义排序和分组

    现有数据如下: 3 3 3 2 3 1 2 2 2 1 1 1 要求为: 先按第一列从小到大排序,如果第一列相同,按第二列从小到大排序 如果是hadoop默认的排序方式,只能比较key,也就是第一列, ...

  3. hadoop深入研究:(十三)——序列化框架

    hadoop深入研究:(十三)--序列化框架 Mapreduce之序列化框架(转自http://blog.csdn.net/lastsweetop/article/details/9376495) 框 ...

  4. 在hadoop作业中自定义分区和归约

    当遇到有特殊的业务需求时,需要对hadoop的作业进行分区处理 那么我们可以通过自定义的分区类来实现 还是通过单词计数的例子,JMapper和JReducer的代码不变,只是在JSubmit中改变了设 ...

  5. 【Hadoop】MapReduce自定义分区Partition输出各运营商的手机号码

    MapReduce和自定义Partition MobileDriver主类 package Partition; import org.apache.hadoop.io.NullWritable; i ...

  6. 为什么hadoop中用到的序列化不是java的serilaziable接口去序列化而是使用Writable序列化框架

    继上一个模块之后,此次分析的内容是来到了Hadoop IO相关的模块了,IO系统的模块可谓是一个比较大的模块,在Hadoop Common中的io,主要包括2个大的子模块构成,1个是以Writable ...

  7. Hadoop【MR的分区、排序、分组】

    [toc] 一.分区 问题:按照条件将结果输出到不同文件中 自定义分区步骤 1.自定义继承Partitioner类,重写getPartition()方法 2.在job驱动Driver中设置自定义的Pa ...

  8. hadoop修改MR的提交的代码程序的副本数

    hadoop修改MR的提交的代码程序的副本数 Under-Replicated Blocks的数量很多,有7万多个.hadoop fsck -blocks 检查发现有很多replica missing ...

  9. Hadoop streaming使用自定义python版本和第三方库

    在使用Hadoop的过程中,遇到了自带python版本比较老的问题. 下面以python3.7为例,演示如何在hadoop上使用自定义的python版本以及第三方库. 1.在https://www.p ...

随机推荐

  1. bzoj4386 Wycieczki

    题目描述 给定一张n个点m条边的带权有向图,每条边的边权只可能是1,2,3中的一种.将所有可能的路径按路径长度排序,请输出第k小的路径的长度,注意路径不一定是简单路径,即可以重复走同一个点. 输入 第 ...

  2. Python之多线程:Threading模块

    1.Threading模块提供的类 Thread,Lock,Rlock,Condition,Semaphore,Event,Timer,local 2.threading模块提供的常用的方法 (1)t ...

  3. 图表绘制工具--Matplotlib 3

    ''' [课程3.] 表格样式创建 表格视觉样式:Dataframe.style → 返回pandas.Styler对象的属性,具有格式化和显示Dataframe的有用方法 样式创建: ① Style ...

  4. Custom Email Attribute在客户端不起作用原因

    原文发布时间为:2011-07-16 -- 来源于本人的百度文章 [由搬家工具导入] Custom Email Attribute在客户端不起作用原因,就是未实现 IClientValidatable ...

  5. 不只是内存分析工具~valgrind

    体系结构:原理介绍·参考好文:应用 Valgrind 发现 Linux 程序的内存问题 简单组一个摘要: Valgrind包括如下一些工具: Memcheck.这是valgrind应用最广泛的工具,一 ...

  6. 【原创】Linux环境下的图形系统和AMD R600显卡编程(9)——R600显卡的3D引擎和图形流水线

    1. R600 3D引擎 R600核心是AMD一款非常重要的GPU核心,这个核心引入了统一处理器架构,其寄存器和指令集同以前的GPU 都完全不同,对其编程也有比较大的区别. 图1显示了R600 GPU ...

  7. "select一直返回0"的问题解决和总结

    场景:一个简单的TCP 服务器,以实现UPNP的事件体系结构 我在linux平台下,创建一个TCP套接字,绑定到49156端口,向UPNP SERVER发一个subscribe订阅请求,超时时间设置为 ...

  8. 【linux高级程序设计】(第十四章)TCP高级应用 3

    控制socket文件描述符属性 1.set/getsockopt()修改socket属性 int getsockopt (int __fd, int __level, int __optname, v ...

  9. MSSQL删除重复记录

    SQL(根据自己需要改列名.表名): delete from tableA where id not in (select min(id) from tableA group by name,age)

  10. Python的程序结构[7] -> 生成器/Generator -> 生成器浅析

    生成器 / Generator 目录 关于生成器 生成器与迭代器 生成器的建立 通过迭代生成器获取值 生成器的 close 方法 生成器的 send 方法 生成器的 throw 方法 空生成器的检测方 ...