/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ package org.apache.hadoop.examples; import java.io.IOException;
import java.net.URI;
import java.util.*; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.InputSampler;
import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; /**
* This is the trivial map/reduce program that does absolutely nothing
* other than use the framework to fragment and sort the input values.
*
* To run: bin/hadoop jar build/hadoop-examples.jar sort
* [-r <i>reduces</i>]
* [-inFormat <i>input format class</i>]
* [-outFormat <i>output format class</i>]
* [-outKey <i>output key class</i>]
* [-outValue <i>output value class</i>]
* [-totalOrder <i>pcnt</i> <i>num samples</i> <i>max splits</i>]
* <i>in-dir</i> <i>out-dir</i>
*/
public class Sort<K,V> extends Configured implements Tool {
public static final String REDUCES_PER_HOST =
"mapreduce.sort.reducesperhost";
private Job job = null; static int printUsage() {
System.out.println("sort [-r <reduces>] " +
"[-inFormat <input format class>] " +
"[-outFormat <output format class>] " +
"[-outKey <output key class>] " +
"[-outValue <output value class>] " +
"[-totalOrder <pcnt> <num samples> <max splits>] " +
"<input> <output>");
ToolRunner.printGenericCommandUsage(System.out);
return 2;
} /**
* The main driver for sort program.
* Invoke this method to submit the map/reduce job.
* @throws IOException When there is communication problems with the
* job tracker.
*/
public int run(String[] args) throws Exception { Configuration conf = getConf();
JobClient client = new JobClient(conf);
ClusterStatus cluster = client.getClusterStatus();
int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
String sort_reduces = conf.get(REDUCES_PER_HOST);
if (sort_reduces != null) {
num_reduces = cluster.getTaskTrackers() *
Integer.parseInt(sort_reduces);
}
Class<? extends InputFormat> inputFormatClass =
SequenceFileInputFormat.class;
Class<? extends OutputFormat> outputFormatClass =
SequenceFileOutputFormat.class;
Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
Class<? extends Writable> outputValueClass = BytesWritable.class;
List<String> otherArgs = new ArrayList<String>();
InputSampler.Sampler<K,V> sampler = null;
for(int i=0; i < args.length; ++i) {
try {
if ("-r".equals(args[i])) {
num_reduces = Integer.parseInt(args[++i]);
} else if ("-inFormat".equals(args[i])) {
inputFormatClass =
Class.forName(args[++i]).asSubclass(InputFormat.class);
} else if ("-outFormat".equals(args[i])) {
outputFormatClass =
Class.forName(args[++i]).asSubclass(OutputFormat.class);
} else if ("-outKey".equals(args[i])) {
outputKeyClass =
Class.forName(args[++i]).asSubclass(WritableComparable.class);
} else if ("-outValue".equals(args[i])) {
outputValueClass =
Class.forName(args[++i]).asSubclass(Writable.class);
} else if ("-totalOrder".equals(args[i])) {
double pcnt = Double.parseDouble(args[++i]);
int numSamples = Integer.parseInt(args[++i]);
int maxSplits = Integer.parseInt(args[++i]);
if (0 >= maxSplits) maxSplits = Integer.MAX_VALUE;
sampler =
new InputSampler.RandomSampler<K,V>(pcnt, numSamples, maxSplits);
} else {
otherArgs.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " +
args[i-1]);
return printUsage(); // exits
}
}
// Set user-supplied (possibly default) job configs
job = Job.getInstance(conf);
job.setJobName("sorter");
job.setJarByClass(Sort.class); job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class); job.setNumReduceTasks(num_reduces); job.setInputFormatClass(inputFormatClass);
job.setOutputFormatClass(outputFormatClass); job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass); // Make sure there are exactly 2 parameters left.
if (otherArgs.size() != 2) {
System.out.println("ERROR: Wrong number of parameters: " +
otherArgs.size() + " instead of 2.");
return printUsage();
}
FileInputFormat.setInputPaths(job, otherArgs.get(0));
FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(1))); if (sampler != null) {
System.out.println("Sampling input to effect total-order sort...");
job.setPartitionerClass(TotalOrderPartitioner.class);
Path inputDir = FileInputFormat.getInputPaths(job)[0];
inputDir = inputDir.makeQualified(inputDir.getFileSystem(conf));
Path partitionFile = new Path(inputDir, "_sortPartitioning");
TotalOrderPartitioner.setPartitionFile(conf, partitionFile);
InputSampler.<K,V>writePartitionFile(job, sampler);
URI partitionUri = new URI(partitionFile.toString() +
"#" + "_sortPartitioning");
DistributedCache.addCacheFile(partitionUri, conf);
} System.out.println("Running on " +
cluster.getTaskTrackers() +
" nodes to sort from " +
FileInputFormat.getInputPaths(job)[0] + " into " +
FileOutputFormat.getOutputPath(job) +
" with " + num_reduces + " reduces.");
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " +
(end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
return ret;
} public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new Sort(), args);
System.exit(res);
} /**
* Get the last job that was run using this instance.
* @return the results of the last job that was run
*/
public Job getResult() {
return job;
}
}

看了源码的第一印象就是,我啥时候写MapReduce也这么规范,这么屌......

Hadoop自带Sort例子分析的更多相关文章

  1. 如何运行Hadoop自带的例子

    bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.4.jar wordcount /WordCount/WordC ...

  2. 三.hadoop mapreduce之WordCount例子

    目录: 目录见文章1 这个案列完成对单词的计数,重写map,与reduce方法,完成对mapreduce的理解. Mapreduce初析 Mapreduce是一个计算框架,既然是做计算的框架,那么表现 ...

  3. hadoop自带例子wordcount的具体运行步骤

    1.在hadoop所在目录“usr/local”下创建一个文件夹input root@ubuntu:/usr/local# mkdir input 2.在文件夹input中创建两个文本文件file1. ...

  4. 几个有关Hadoop自带的性能测试工具的应用

    http://www.talkwithtrend.com/Question/177983-1247453 一些测试的描述如下内容最为详细,供你参考: 测试对于验证系统的正确性.分析系统的性能来说非常重 ...

  5. VS2010自带的性能分析工具分析.NET程序的性能

    这篇博文给大家分享的是,如何使用VS自带的性能分析工具来分析我们编写的.NET程序,一边找出程序性能的瓶颈,改善代码的质量.在实际开发中,性能真的很重要,往往决定一个产品的生死~良好的用户体验的基础之 ...

  6. ZT 复杂的函数指针例子分析 2008

     复杂的函数指针例子分析 2008-01-26 11:38:22 分类: 一个最简单的函数指针定义如下: Void (*funcPtr) (); //funcptr是一个函数指针,它指向的函数没有参数 ...

  7. 使用VS自带的工具分析.NET程序的性能

    (转自:http://www.cnblogs.com/DebugLZQ/archive/2012/07/10/2585245.html) 这篇博文给大家分享的是,如何使用VS自带的性能分析工具来分析我 ...

  8. hadoop自带的writable类型

    Hadoop 中,并没有使用Java自带的基本类型类(Integer.Float等),而是使用自己开发的类.Hadoop 自带有很多序列化类型,大致分为以下两种: 实现了WritableCompara ...

  9. spark JavaDirectKafkaWordCount 例子分析

    spark  JavaDirectKafkaWordCount 例子分析: 1. KafkaUtils.createDirectStream( jssc, String.class, String.c ...

随机推荐

  1. 安装mysql数据库图文教程

    一.首先下载该版本的Mysql  5.5.28双击软件,弹出软件的安装界面如下 二.点击Next  ,点击同意

  2. apache 单独生成模块

    apache 单独生成模块 一般这种模块都是后期自己去生成的,比如一般在安装apache时都会--enable-so  ,允许动态加载模块. 这个模块你可以这样去生成. 1.下载一个与当前使用的apa ...

  3. 使用 SQL Server 的 uniqueidentifier 字段类型

    原文:使用 SQL Server 的 uniqueidentifier 字段类型 SQL Server 自 2008 版起引入了 uniqueidentifier 字段,它存储的是一个 UUID, 或 ...

  4. zookeeper 学习笔记2

    ephemeral 英[ɪˈfemərəl]美[ɪˈfɛmərəl]adj. 朝生暮死; 短暂的,瞬息的; 朝露; 一年生; ZooKeeper Watcher 机制 集群状态监控示例 为了确保集群能 ...

  5. IMAP IDLE模式(推送邮件)

    在电子邮件技术中,IDLE是RFC 2177中描述的一项IMAP功能,它允许客户端向服务器表明它已准备好接受实时通知. Internet消息访问协议IMAP4协议,它要求客户端轮询服务器来更改所选中的 ...

  6. 介绍下Shell中的${}、##和%%使用范例

    假设定义了一个变量为:代码如下:file=/dir1/dir2/dir3/my.file.txt可以用${ }分别替换得到不同的值:${file#*/}:删掉第一个 / 及其左边的字符串:dir1/d ...

  7. testng执行报错:org.testng.TestNGException: Cannot find class in classpath

    org.testng.TestNGException: Cannot find class in classpath 解决办法:project->clean 再次执行正常运行  

  8. 转: 通过Servlet生成验证码图片

    孤傲苍狼 只为成功找方法,不为失败找借口! javaweb学习总结(九)—— 通过Servlet生成验证码图片 一.BufferedImage类介绍 生成验证码图片主要用到了一个BufferedIma ...

  9. Eclipse web项目导入Intellij 并且部署

    一.导入自己的web项目 步骤:File->New->Project from Existing Source... 二.选择项目的所在位置,点击"OK";接着如下图所 ...

  10. redis学习笔记——入门

    基本安装和用法:http://www.tuicool.com/articles/QzMRNb Redis如何通过本机客户端访问远程服务器段:http://blog.sina.com.cn/s/blog ...