大家在学习Hadoop的MapReduce的时候,90%的第一个程序都是WordCount,所以在这里分享一下我的第二个MapReduce程序。对于学习编程语言的人来说,有时候代码是最好的沟通方式之一。

 package com.zhongxin.mr;

 import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import java.io.IOException;
import java.math.BigDecimal;
import java.util.regex.Pattern; /**
* 用户已收本息
* Created by DingYS on 2017/11/21.
*/
public class UserReceiveAmount { public static class Map extends Mapper<LongWritable,Text,Text,Text>{
private Text outKey = new Text();
private Text outValue = new Text();
private Pattern pattern = Pattern.compile(","); @Override
public void map(LongWritable key,Text value,Context context) throws IOException,InterruptedException{
// 利息
BigDecimal interest = new BigDecimal(0);
// 本金
BigDecimal capital = new BigDecimal(0);
String splits[] = pattern.split(String.valueOf(value));
String onwerType = splits[2];
String fundsDirection = splits[6];
String tradeType = splits[5];
String penaltyAmount = splits[15];
String tradeAmount = splits[7];
String tradeShare = splits[8];
String ownerCustomNo = splits[1];
if("USER".equals(onwerType) && "INCR".equals(fundsDirection) && !Pattern.matches("CURRENT_.*?",tradeType)){
if("INTEREST".equals(tradeType) && ("null".equals(penaltyAmount) || "".equals(penaltyAmount) ||"0.00".equals(penaltyAmount))){
interest =new BigDecimal(Double.parseDouble(tradeAmount)).setScale(2,BigDecimal.ROUND_HALF_UP);
}else{
interest = new BigDecimal(Double.parseDouble(tradeAmount)).subtract(new BigDecimal(Double.parseDouble(tradeShare))).setScale(2,BigDecimal.ROUND_HALF_UP);
capital = new BigDecimal(Double.parseDouble(tradeShare)).setScale(2,BigDecimal.ROUND_HALF_UP);
}
outKey.set(ownerCustomNo);
outValue.set(String.valueOf(interest) + pattern + String.valueOf(capital));
context.write(outKey,outValue);
}
}
} public static class Reduce extends Reducer<Text,Text,Text,Text>{ public void reduce(Text key,Iterable<Text> values,Context context) throws IOException,InterruptedException{
Text outValue = new Text();
BigDecimal interest = new BigDecimal(0);
BigDecimal capital = new BigDecimal(0);
for(Text value:values){
String[] splits = value.toString().split(",");
interest = interest.add(new BigDecimal(Double.parseDouble(splits[0]))).setScale(2,BigDecimal.ROUND_HALF_UP);
capital = capital.add(new BigDecimal(Double.parseDouble(splits[1]))).setScale(2,BigDecimal.ROUND_HALF_UP);
}
outValue.set(String.valueOf(interest) + "\t" + String.valueOf(capital));
context.write(key,outValue);
}
} public static void main(String[] args) throws Exception{
Configuration config = new Configuration();
Job job = Job.getInstance(config);
job.setJobName("userReceiveAmount");
job.setJarByClass(UserReceiveAmount.class); job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class); job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class); job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.addInputPath(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1])); job.waitForCompletion(true); }
}

对于看懂mapReduce这个程序,有一个非常关键的点就是:map每次读取一行数据,相同key的数据进入到同一个reduce中。

上面是将统计结果输出到hdfs上,下面来一个输出到Hbase中的,请看码

package com.zhongxin.mr;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import java.io.IOException;
import java.math.BigDecimal;
import java.util.regex.Pattern; /**
* 用户已收本息
* Created by DingYS on 2017/11/21.
*/
public class UserReceiveAmount { public static class Map extends Mapper<LongWritable,Text,Text,Text>{
private Text outKey = new Text();
private Text outValue = new Text();
private Pattern pattern = Pattern.compile(","); @Override
public void map(LongWritable key,Text value,Context context) throws IOException,InterruptedException{
// 利息
BigDecimal interest = new BigDecimal(0);
// 本金
BigDecimal capital = new BigDecimal(0);
String splits[] = pattern.split(String.valueOf(value));
String onwerType = splits[2];
String fundsDirection = splits[6];
String tradeType = splits[5];
String penaltyAmount = splits[15];
String tradeAmount = splits[7];
String tradeShare = splits[8];
String ownerCustomNo = splits[1];
if("USER".equals(onwerType) && "INCR".equals(fundsDirection) && !Pattern.matches("CURRENT_.*?",tradeType)){
if("INTEREST".equals(tradeType) && ("null".equals(penaltyAmount) || "".equals(penaltyAmount) ||"0.00".equals(penaltyAmount))){
interest =new BigDecimal(Double.parseDouble(tradeAmount)).setScale(2,BigDecimal.ROUND_HALF_UP);
}else{
interest = new BigDecimal(Double.parseDouble(tradeAmount)).subtract(new BigDecimal(Double.parseDouble(tradeShare))).setScale(2,BigDecimal.ROUND_HALF_UP);
capital = new BigDecimal(Double.parseDouble(tradeShare)).setScale(2,BigDecimal.ROUND_HALF_UP);
}
outKey.set(ownerCustomNo);
outValue.set(String.valueOf(interest) + pattern + String.valueOf(capital));
context.write(outKey,outValue);
}
}
} public static class Reduce extends TableReducer<Text,Text,ImmutableBytesWritable> { ImmutableBytesWritable k = new ImmutableBytesWritable(); public void reduce(Text key,Iterable<Text> values,Context context) throws IOException,InterruptedException{
BigDecimal interest = new BigDecimal(0);
BigDecimal capital = new BigDecimal(0);
for(Text value:values){
String[] splits = value.toString().split(",");
interest = interest.add(new BigDecimal(Double.parseDouble(splits[0]))).setScale(2,BigDecimal.ROUND_HALF_UP);
capital = capital.add(new BigDecimal(Double.parseDouble(splits[1]))).setScale(2,BigDecimal.ROUND_HALF_UP);
}
String family = "info";
Put put = new Put(String.valueOf(key).getBytes());
put.addColumn(family.getBytes(),"interest".getBytes(),String.valueOf(interest).getBytes());
put.addColumn(family.getBytes(),"capital".getBytes(),String.valueOf(capital).getBytes());
k.set(key.getBytes());
context.write(k,put);
}
} public static void main(String[] args) throws Exception{
Configuration config = HBaseConfiguration.create();
Job job = Job.getInstance(config,"userReceiveAmount");
job.setJarByClass(UserReceiveAmount.class); FileInputFormat.addInputPath(job,new Path(args[0]));
job.setMapperClass(Map.class);
TableMapReduceUtil.initTableReducerJob("userReceiveAmount",Reduce.class,job); job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(Text.class);
job.setOutputValueClass(Mutation.class); System.exit(job.waitForCompletion(true) ? 0 : 1); }
}

  注意点:rowkey的类型需要是String,如果是Text,需要Text.toString()一下,Text中重写了toString()方法,经测试String.valueOf()也没问题

Put put = new Put(rowkey.getBytes());

  

第二个MapReduce的更多相关文章

  1. Hadoop入门第二篇-MapReduce学习

    mapreduce是一种计算模型,是google的一篇论文向全世界介绍了MapReduce.MapReduce其实可以可以用多种语言编写Map或Reduce程序,因为hadoop是java写的,所以通 ...

  2. 使用mapreduce计算环比的实例

    最近做了一个小的mapreduce程序,主要目的是计算环比值最高的前5名,本来打算使用spark计算,可是本人目前spark还只是简单看了下,因此就先改用mapreduce计算了,今天和大家分享下这个 ...

  3. MapReduce工作流多种实现方式

    学习 hadoop,必不可少的就是编写 MapReduce 程序.当然,对于简单的分析程序,我们只需一个 MapReduce 任务就能搞定,然而对于比较复杂的分析程序,我们可能需要多个Job或者多个M ...

  4. MapReduce多重MR如何实现

    一.每次输出文件存在很烦人 // 判断output文件夹是否存在,如果存在则删除 Path path = new Path(otherArgs[1]);// 取第1个表示输出目录参数(第0个参数是输入 ...

  5. Hadoop MapReduce编程学习

    一直在搞spark,也没时间弄hadoop,不过Hadoop基本的编程我觉得我还是要会吧,看到一篇不错的文章,不过应该应用于hadoop2.0以前,因为代码中有  conf.set("map ...

  6. hadoop2.2编程:使用MapReduce编程实例(转)

    原文链接:http://www.cnblogs.com/xia520pi/archive/2012/06/04/2534533.html 从网上搜到的一篇hadoop的编程实例,对于初学者真是帮助太大 ...

  7. MapReduce链接作业

    对于简单的分析程序,我们只需一个MapReduce就能搞定,然而对于比较复杂的分析程序,我们可能需要多个Job或者多个Map或者Reduce进行计算.下面我们来说说多个Job或者多个MapReduce ...

  8. mapreduce (五) MapReduce实现倒排索引 修改版 combiner是把同一个机器上的多个map的结果先聚合一次

    (总感觉上一篇的实现有问题)http://www.cnblogs.com/i80386/p/3444726.html combiner是把同一个机器上的多个map的结果先聚合一次现重新实现一个: 思路 ...

  9. MapReduce初级案例

    1.数据去重  "数据去重"主要是为了掌握和利用并行化思想来对数据进行有意义的筛选.统计大数据集上的数据种类个数.从网站日志中计算访问地等这些看似庞杂的任务都会涉及数据去重.下面就 ...

随机推荐

  1. 前端工程化grunt

    1.grunt是什么? grunt是基于nodejs的前端构建工具.grunt用于解决前端开发的工程问题. 2.安装nodejs Grunt和所有grunt插件都是基于nodejs来运行的. 安装了n ...

  2. 进程池与线程池(concurrent.futures)

    from concurrent.futures import ProcessPoolExecutor import os,time,random def task(n): print('%s is r ...

  3. 线程queue

    import queue q = queue.Queue() #模拟队列,先进先出 q.put('first') q.put('second') q.put('third') print(q.get( ...

  4. linux命令综合

    查找文件中指定字符串并且高亮显示: find .|xargs grep --color=auto "hello" dos下查找: netstat -ano|findstr &quo ...

  5. 清空jQuery validation 显示的错误信息

    现在做个BS结构的查询系统,登陆的时候添加个重置的按钮. 他的功能主要是: 1.清空Input的text 2.jQuery validation插件当错误的时候显示的信息,解决方法如下面的代码 var ...

  6. Express+Mongoose(MongoDB)+Vue2全栈微信商城项目全记录

    最近用vue2做了一个微信商城项目,因为做的比较仓促,所以一边写一下整个流程,一边稍做优化. 项目github地址:https://github.com/seven9115/vue-fullstack ...

  7. HTTP认证方式与https简介

    HTTP认证与https简介 HTTP请求报头: Authorization [ˌɔ:θəraɪˈzeɪʃn] HTTP响应报头: WWW-Authenticate [ɔ:ˈθentɪkeɪt] HT ...

  8. LeetCode 661. Image Smoother (图像平滑器)

    Given a 2D integer matrix M representing the gray scale of an image, you need to design a smoother t ...

  9. [Scikit-learn] 4.4 Dimensionality reduction - PCA

    2.5. Decomposing signals in components (matrix factorization problems) 2.5.1. Principal component an ...

  10. Leetcode题解(28)

    90. Subsets II 题目 分析:代码如下 class Solution { public: vector<vector<int> > subsetsWithDup(v ...