使用JobControl控制MapReduce任务
代码结构
BeanWritable:往数据库读写使用的bean
ControlJobTest:JobControl任务控制
DBInputFormatApp:将关系型数据库的数据导入HDFS,其中包含了Map、Reduce,内部静态类
DBOutputFormatApp:将HDFS的结构化数据导入关系型数据库
此处关系型数据库使用Mysql
代码如下
BeanWritable.java
/**
*
*/
package com.zhen.controlJobTest; import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.lib.db.DBWritable; /**
* JavaBean
* 需要实现Hadoop序列化接口Writable以及与数据库交互时的序列化接口DBWritable
* 官方API中解释如下:
* public class DBInputFormat<T extends DBWritable>
* extends InputFormat<LongWritable, T> implements Configurable
* 即Mapper的Key是LongWritable类型,不可改变;Value是继承自DBWritable接口的自定义JavaBean
*
* @author FengZhen
*/
public class BeanWritable implements Writable, DBWritable { private int id;
private String name;
private double height; public void readFields(ResultSet resultSet) throws SQLException {
this.id = resultSet.getInt();
this.name = resultSet.getString();
this.height = resultSet.getDouble();
} public void write(PreparedStatement preparedStatement) throws SQLException {
preparedStatement.setInt(, id);
preparedStatement.setString(, name);
preparedStatement.setDouble(, height);
} public void readFields(DataInput dataInput) throws IOException {
this.id = dataInput.readInt();
this.name = dataInput.readUTF();
this.height = dataInput.readDouble();
} public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeInt(id);
dataOutput.writeUTF(name);
dataOutput.writeDouble(height);
} public void set(int id,String name,double height){
this.id = id;
this.name = name;
this.height = height;
} @Override
public String toString() {
return id + "\t" + name + "\t" + height;
} }
DBInputFormatApp.java
package com.zhen.controlJobTest; import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.lib.db.DBWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; /**
* @author FengZhen
* 将mysql数据导入hdfs
*/
public class DBInputFormatApp{ /**
* Map
* 当Map的输出key为LongWritable,value为Text时,reduce可以省略不写,默认reduce也是输出LongWritable:Text
* */
public static class DBInputMapper extends Mapper<LongWritable, BeanWritable, LongWritable, Text> { private LongWritable outputKey;
private Text outputValue; @Override
protected void setup(Mapper<LongWritable, BeanWritable, LongWritable, Text>.Context context)
throws IOException, InterruptedException {
this.outputKey = new LongWritable();
this.outputValue = new Text();
} @Override
protected void map(LongWritable key, BeanWritable value,
Mapper<LongWritable, BeanWritable, LongWritable, Text>.Context context)
throws IOException, InterruptedException {
outputKey.set(key.get());;
outputValue.set(value.toString());
context.write(outputKey, outputValue);
} }
}
DBOutputFormatApp.java
package com.zhen.controlJobTest; import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.lib.db.DBWritable;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; /**
* @author FengZhen
* 将hdfs数据导入mysql
* 使用DBOutputFormat将HDFS路径下的结构化数据写入mysql中,结构化数据如下,第一列为key,后边三列为数据
* 0 1 Enzo 180.66
* 1 2 Din 170.666
*
*/
public class DBOutputFormatApp{ public static class DBOutputMapper extends Mapper<LongWritable, Text, NullWritable, BeanWritable>{
private NullWritable outputKey;
private BeanWritable outputValue; @Override
protected void setup(Mapper<LongWritable, Text, NullWritable, BeanWritable>.Context context)
throws IOException, InterruptedException {
this.outputKey = NullWritable.get();
this.outputValue = new BeanWritable();
}
@Override
protected void map(LongWritable key, Text value,
Mapper<LongWritable, Text, NullWritable, BeanWritable>.Context context)
throws IOException, InterruptedException {
//插入数据库成功的计数器
final Counter successCounter = context.getCounter("exec", "successfully");
//插入数据库失败的计数器
final Counter faildCounter = context.getCounter("exec", "faild");
//解析结构化数据
String[] fields = value.toString().split("\t");
//DBOutputFormatApp这个MapReduce应用导出的数据包含long类型的key,所以忽略key从1开始
if (fields.length > ) {
int id = Integer.parseInt(fields[]);
String name = fields[];
double height = Double.parseDouble(fields[]);
this.outputValue.set(id, name, height);
context.write(outputKey, outputValue);
//如果插入数据库成功则递增1,表示成功计数
successCounter.increment(1L);
}else{
//如果插入数据库失败则递增1,表示失败计数
faildCounter.increment(1L);
} }
} /**
* 输出的key必须是继承自DBWritable的类型,DBOutputFormat要求输出的key必须是DBWritable类型
* */
public static class DBOutputReducer extends Reducer<NullWritable, BeanWritable, BeanWritable, NullWritable>{
@Override
protected void reduce(NullWritable key, Iterable<BeanWritable> values,
Reducer<NullWritable, BeanWritable, BeanWritable, NullWritable>.Context context)
throws IOException, InterruptedException {
for (BeanWritable beanWritable : values) {
context.write(beanWritable, key);
}
}
} }
ControlJobTest.java
/**
*
*/
package com.zhen.controlJobTest; import java.io.IOException; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBInputFormat;
import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
import org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import com.zhen.controlJobTest.DBInputFormatApp.DBInputMapper;
import com.zhen.controlJobTest.DBOutputFormatApp.DBOutputMapper;
import com.zhen.controlJobTest.DBOutputFormatApp.DBOutputReducer; /**
* @author FengZhen
*
*/
public class ControlJobTest { public static void main(String[] args) throws IOException {
//第一个任务,mysql导入到HDFS
Configuration configuration1 = new Configuration();
//配置当前作业需要使用的JDBC配置
DBConfiguration.configureDB(configuration1, "com.mysql.jdbc.Driver", "jdbc:mysql://localhost:3306/hadoop",
"root", "123qwe");
Job job1 = Job.getInstance(configuration1, DBInputFormatApp.class.getSimpleName()); job1.setJarByClass(DBInputFormatApp.class);
job1.setMapperClass(DBInputMapper.class);
job1.setMapOutputKeyClass(LongWritable.class);
job1.setMapOutputValueClass(Text.class); job1.setOutputKeyClass(LongWritable.class);
job1.setOutputValueClass(Text.class); //配置作业的输入数据格式
job1.setInputFormatClass(DBInputFormat.class);
//配置当前作业需要查询的sql语句及接收sql语句的bean
DBInputFormat.setInput(
job1,
BeanWritable.class,
"select * from people",
"select count(1) from people"); FileOutputFormat.setOutputPath(job1, new Path(args[])); //第二个任务 HDFS导出到mysql Configuration configuration2 = new Configuration();
//在创建Configuration的时候紧接着配置数据库连接信息
DBConfiguration.configureDB(configuration2, "com.mysql.jdbc.Driver", "jdbc:mysql://localhost:3306/hadoop", "root", "123qwe");
Job job2 = Job.getInstance(configuration2, DBOutputFormatApp.class.getSimpleName());
job2.setJarByClass(DBOutputFormatApp.class);
job2.setMapperClass(DBOutputMapper.class);
job2.setMapOutputKeyClass(NullWritable.class);
job2.setMapOutputValueClass(BeanWritable.class); job2.setReducerClass(DBOutputReducer.class);
job2.setOutputFormatClass(DBOutputFormat.class);
job2.setOutputKeyClass(BeanWritable.class);
job2.setOutputValueClass(NullWritable.class); job2.setInputFormatClass(TextInputFormat.class);
FileInputFormat.setInputPaths(job2, args[]);
//配置当前作业输出到数据库表、字段信息
DBOutputFormat.setOutput(job2, "people", new String[]{"id","name","height"}); ControlledJob controlledJob1 = new ControlledJob(configuration1);
controlledJob1.setJob(job1); ControlledJob controlledJob2 = new ControlledJob(configuration2);
controlledJob2.setJob(job2); //如果两个任务有依赖关系,必须设置此选项
controlledJob2.addDependingJob(controlledJob1); JobControl jobControl = new JobControl("groupName");
jobControl.addJob(controlledJob1);
jobControl.addJob(controlledJob2);
jobControl.run(); while(true){
boolean allFinished = jobControl.allFinished();
if (allFinished) {
System.exit();
}
} } }
mysql表结构如下
CREATE TABLE `people` (
`id` int() NOT NULL,
`name` varchar() DEFAULT NULL,
`height` double DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8
先插入测试数据
然后将代码打包为jar,传到服务器,执行任务
hadoop jar /Users/FengZhen/Desktop/Hadoop/other/mapreduce_jar/JobControlTest.jar com.zhen.controlJobTest.ControlJobTest
/user/hadoop/mapreduce/mysqlToHdfs/people
此任务包含了两个子任务,一个是将mysql数据导入HDFS,一个是将HDFS的数据导出Mysql,也可以写个简单的mapreduce任务来测试。
如果两个子任务有依赖关系,那么必须要设置
controlledJob2.addDependingJob(controlledJob1);
说明job2依赖于job1,当job1执行完之后才会去执行job2.
使用JobControl控制MapReduce任务的更多相关文章
- 6.7 Mapreduce作业流JobControl和Oozie
1.1 Mapreduce作业流JobControl和Oozie 更复杂的任务,需要多个mapreduce作业,形成作业流,而不是增加map和reduce的复杂度.复杂问题,可以用高级语言pig.h ...
- 组合式+迭代式+链式 MapReduce
1.迭代式mapreduce 一些复杂的任务难以用一次mapreduce处理完成,需要多次mapreduce才能完成任务,例如Pagrank,Kmeans算法都需要多次的迭代,关于mapreduce迭 ...
- MapReduce的核心资料索引 [转]
转自http://prinx.blog.163.com/blog/static/190115275201211128513868/和http://www.cnblogs.com/jie46583173 ...
- 谷歌三大核心技术(二)Google MapReduce中文版
谷歌三大核心技术(二)Google MapReduce中文版 Google MapReduce中文版 译者: alex 摘要 MapReduce是一个编程模型,也是一个处理和生成超大数据 ...
- MapReduce笔记——技术点汇总
目录 · 概况 · 原理 · MapReduce编程模型 · MapReduce过程 · 容错机制 · API · 概况 · WordCount示例 · Writable接口 · Mapper类 · ...
- 【机器学习实战】第15章 大数据与MapReduce
第15章 大数据与MapReduce 大数据 概述 大数据: 收集到的数据已经远远超出了我们的处理能力. 大数据 场景 假如你为一家网络购物商店工作,很多用户访问该网站,其中有些人会购买商品,有些人则 ...
- MapReduce shuffle过程剖析及调优
MapReduce简介 在Hadoop MapReduce中,框架会确保reduce收到的输入数据是根据key排序过的.数据从Mapper输出到Reducer接收,是一个很复杂的过程,框架处理了所有问 ...
- 大数据技术 - MapReduce 应用的配置和单元测试
上一章的 MapReduce 应用中,我们使用了自定义配置,并用 GenericOptionsParser 处理命令行输入的配置,这种方式简单粗暴.但不是 MapReduce 应用常见的写法,本章第一 ...
- Python初次实现MapReduce——WordCount
前言 Hadoop 本身是用 Java 开发的,所以之前的MapReduce代码小练都是由Java代码编写,但是通过Hadoop Streaming,我们可以使用任意语言来编写程序,让Hadoop 运 ...
随机推荐
- ps选框工具全解
我们每次选择工具的时候,ps上面都会变成特定的选项,比如说下面这些: 比如说选区工具的话就分为新选区.添加选区.交叉选区之类的,这些都是需要在实战中练习的. 不单单是选区有这个工具,其他的也有这个功能 ...
- SendMessage用法
SendMessage(hWnd,wMsg,wParam,lParam) 参数1:hWnd-窗口句柄.窗口可以是任何类型的屏幕对象. 参数2:wMsg-用于区别其他消息的常量值. 参数3:wParam ...
- oracle中过滤中文字符或者汉字的函数
CREATE OR REPLACE FUNCTION GET_CHINESE(P_NAME IN VARCHAR2) RETURN VARCHAR2 IS V_CODE VARCHAR2 ...
- oracle浅析导致数据库性能问题的常见原因
㈠ 不合理的大表全表扫描 详见:点击打开链接 v$session_longops视图记录了超过6秒的所有SQL语句 这其中绝大部是全表扫描的语句! ㈡ 语句共享性不好 常出没在OLTP, ...
- JavaScript函数的中实参个数和形参个数的获取
首先先理解下什么是函数的形参和函数的实参,其实很好理解的,下面举例说明 如何获取形参的长度以及实参的长度 获取实参的长度 可以看到控制台输出的长度是3, 这里有疑问了,arguments是什么那? a ...
- XSD文件详解(二)
<?xml version="1.0" encoding="gb2312"?> <studentlist> <student ...
- Linux与本地上传下载文件
当出于安全原因,客户的服务器不允许使用windows第三方插件连接的时候,本地windows跟远程的linux服务器进行文件的上下传受到了限制,此时可以在linux服务器上安装一个工具:lrzsz.安 ...
- PHPstorm如何导入字体主题
概要: 今天在安装phpstorm的时候发现导入字体主题时,出了问题,这个问题总是困惑我,并且曾经遇到过,没记录下来,所以想着这次记录下来吧.网上搜的稀里糊涂的,还是自己做个summary! 前提: ...
- Lumen Repository(仓储)
在 Laravel 5 中使用 Repository 模式实现业务逻辑和数据访问的分离:http://laravelacademy.org/post/3063.html Eloquent: 集合:ht ...
- 1065. [Nescafe19] 绿豆蛙的归宿(概率)
1065. [Nescafe19] 绿豆蛙的归宿 ★ 输入文件:ldfrog.in 输出文件:ldfrog.out 简单对比时间限制:1 s 内存限制:128 MB [背景] 随着新版 ...