hadoop2.2.0 MapReduce求和并排序
javabean必须实现WritableComparable接口,并实现该接口的序列化,反序列话和比较方法
package com.my.hadoop.mapreduce.sort;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.WritableComparable;
public class InfoBean implements WritableComparable<InfoBean> {
private String account;
private double income;
private double expences;
private double surplus;
public void set(String account, double income, double expences){
this.account = account;
this.income = income;
this.expences = expences;
this.surplus = income - expences;
}
@Override
public String toString() {
return income+"\t"+expences+"\t"+surplus;
}
@Override
public void readFields(DataInput in) throws IOException {
this.account = in.readUTF();
this.income = in.readDouble();
this.expences = in.readDouble();
this.surplus = in.readDouble();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(this.account);
out.writeDouble(this.income);
out.writeDouble(this.expences);
out.writeDouble(this.surplus);
}
@Override
public int compareTo(InfoBean o) {
if (this.income == o.getIncome()) {
return this.expences > o.getExpences() ? 1 : -1;
} else {
return this.income > o.getIncome() ? -1 : 1;
}
}
public String getAccount() {
return account;
}
public void setAccount(String account) {
this.account = account;
}
public double getIncome() {
return income;
}
public void setIncome(double income) {
this.income = income;
}
public double getExpences() {
return expences;
}
public void setExpences(double expences) {
this.expences = expences;
}
public double getSurplus() {
return surplus;
}
public void setSurplus(double surplus) {
this.surplus = surplus;
}
}
先求和
package com.my.hadoop.mapreduce.sort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class SumStep {
public static class SumMap extends Mapper<LongWritable, Text, Text, InfoBean>{
private Text k = new Text();
private InfoBean v = new InfoBean();
@Override
public void map(LongWritable key, Text value, Context context) throws java.io.IOException ,InterruptedException {
String[] fields = value.toString().split("\t");
String account = fields[0];
double in = Double.parseDouble(fields[1]);
double out = Double.parseDouble(fields[2]);
k.set(account);
v.set(account, in, out);
context.write(k, v);
}
}
public static class SumReduce extends Reducer<Text, InfoBean, Text, InfoBean>{
private InfoBean v = new InfoBean();
@Override
public void reduce(Text key, Iterable<InfoBean> value, Context context) throws java.io.IOException ,InterruptedException {
double in_sum = 0;
double out_sum = 0;
for (InfoBean bean : value) {
in_sum += bean.getIncome();
out_sum += bean.getExpences();
}
v.set("", in_sum, out_sum);
context.write(key, v);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, SumStep.class.getSimpleName());
job.setJarByClass(SumStep.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
job.setMapperClass(SumMap.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(InfoBean.class);
job.setReducerClass(SumReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(InfoBean.class);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 :1);
}
}
后排序
package com.my.hadoop.mapreduce.sort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class SortStep {
public static class SortMap extends Mapper<LongWritable, Text, InfoBean, NullWritable>{
private InfoBean k = new InfoBean();
@Override
public void map(LongWritable key, Text value, Context context) throws java.io.IOException ,InterruptedException {
System.out.println("===="+value.toString()+"====");
String[] fields = value.toString().split("\t");
String account = fields[0];
double in = Double.parseDouble(fields[1]);
double out = Double.parseDouble(fields[2]);
k.set(account, in, out);
context.write(k, NullWritable.get());
}
}
public static class SortReduce extends Reducer<InfoBean, NullWritable, Text, InfoBean>{
private Text k = new Text();
@Override
public void reduce(InfoBean bean, Iterable<NullWritable> value, Context context) throws java.io.IOException ,InterruptedException {
k.set(bean.getAccount());
context.write(k, bean);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, SortStep.class.getSimpleName());
job.setJarByClass(SortStep.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
job.setMapperClass(SortMap.class);
job.setMapOutputKeyClass(InfoBean.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(SortReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(InfoBean.class);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 :1);
}
}
hadoop2.2.0 MapReduce求和并排序的更多相关文章
- hadoop2.2.0 MapReduce分区
package com.my.hadoop.mapreduce.partition; import java.util.HashMap;import java.util.Map; import org ...
- hadoop2.2.0 MapReduce的序列化
package com.my.hadoop.mapreduce.dataformat; import java.io.DataInput;import java.io.DataOutput;impor ...
- 【hadoop2.6.0】用C++ 编写mapreduce
hadoop通过hadoop streaming 来实现用非Java语言写的mapreduce代码. 对于一个一点Java都不会的我来说,这真是个天大的好消息. 官网上hadoop streaming ...
- 一脸懵逼学习Hadoop中的序列化机制——流量求和统计MapReduce的程序开发案例——流量求和统计排序
一:序列化概念 序列化(Serialization)是指把结构化对象转化为字节流.反序列化(Deserialization)是序列化的逆过程.即把字节流转回结构化对象.Java序列化(java.io. ...
- 国内最全最详细的hadoop2.2.0集群的MapReduce的最简单配置
简介 hadoop2的中的MapReduce不再是hadoop1中的结构已经没有了JobTracker,而是分解成ResourceManager和ApplicationMaster.这次大变革被称为M ...
- 编写简单的Mapreduce程序并部署在Hadoop2.2.0上运行
今天主要来说说怎么在Hadoop2.2.0分布式上面运行写好的 Mapreduce 程序. 可以在eclipse写好程序,export或用fatjar打包成jar文件. 先给出这个程序所依赖的Mave ...
- Hadoop2.2.0 第一步完成MapReduce wordcount计算文本数量
1.完成Hadoop2.2.0单机版环境搭建之后需要利用一个例子程序来检验hadoop2 的mapreduce的功能 //启动hdfs和yarn sbin/start-dfs.sh sbin/star ...
- 使用命令行编译打包运行自己的MapReduce程序 Hadoop2.6.0
使用命令行编译打包运行自己的MapReduce程序 Hadoop2.6.0 网上的 MapReduce WordCount 教程对于如何编译 WordCount.java 几乎是一笔带过… 而有写到的 ...
- Eclipse中部署hadoop2.3.0
1 eclipse中hadoop环境部署概览 eclipse 中部署hadoop包括两大部分:hdfs环境部署和mapreduce任务执行环境部署.一般hdfs环境部署比较简单,部署后就 可以在ecl ...
随机推荐
- UITableView显示不全
先上图(不全图片): 正确图片: 原因例如以下: 1.在tableView的父视图的freme问题. 2.tableView本身的frame问题.大小依据自己的实际情况改过来就OK了 希望能够帮助到你 ...
- [转] GDB 下 watch的使用
这里大概说下gdb调试程序时,watch的使用.至于原理尚不清楚,以后再做补充,还请见谅. watch通常需要和break,run,continue联合使用. 下面举例说明: 代码如下: #inclu ...
- Configuring Network Configuration-RHEL7
1.查看网络状态systemctl status NetworkManager You can use the systemctl status NetworkManager command to ...
- php long time(1)
好久好久没有发表新的文章了,主要是懒得在这里写,都记在记事本上,所得都是自己理解的情况下写的,如今借此闲暇记录下来,:::: ****************PHP****************** ...
- maven第5章坐标和依赖
5.5依赖范围 runtime:运行时依赖范围 举的例子是JDBC驱动实现,不理解? 编译的时候只需要引入jdk提供的jdbc类和方法,比如java.sql.*;这时编译没有报错,但是没有引入mysq ...
- (转)asp.net中Literal与label的区别
asp.net中Literal与label的区别 一.Literal Web 服务器控件概述(摘于MSDN) 可以使用 Literal Web 服务器控件作为页面上其他内容的容器.Literal 最常 ...
- java socket报文通信(三)java对象和xml格式文件的相互转换
前两节讲了socket服务端,客户端的建立以及报文的封装.今天就来讲一下java对象和xml格式文件的相互转换. 上一节中我们列举了一个报文格式,其实我们可以理解为其实就是一个字符串.但是我们不可能每 ...
- 使用UILocalNotification给App添加本地消息通知
使用过的代码,直接贴上 UILocalNotification *notification = [[UILocalNotification alloc] init]; if (notification ...
- Java实现多线程下载
package cn.test.DownLoad; import java.io.File; import java.io.InputStream; import java.io.RandomAcce ...
- ios nslog 打印字典为中文
#import <Foundation/Foundation.h> @implementation NSDictionary (Log) - (NSString *)description ...