程序源码

 import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; public class WordCount {
public static class WordCountMap extends
Mapper<LongWritable, Text, Text, IntWritable> {
private final IntWritable one = new IntWritable(1);//输出的值 1
private Text word = new Text();//输出的键 单词 public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {//处理经过 TextInputFormat 产生的 <k1,v1>,然后产生 <k2,v2>
String line = value.toString();//读取文本中
StringTokenizer token = new StringTokenizer(line);//按照空格对单词进行切割
while (token.hasMoreTokens()) {
word.set(token.nextToken());//读取到的单词作为键值
context.write(word, one);//以 单词,1的中间形式交给reduce处理
}
}
} public static class WordCountReduce extends
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
} public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf);
job.setJarByClass(WordCount.class);
job.setJobName("wordcount");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(WordCountMap.class);
job.setReducerClass(WordCountReduce.class);
job.setInputFormatClass(TextInputFormat.class);//生成可供Map处理的键值对
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true);
}
}

1 编译源码

javac -classpath /opt/hadoop-1.2.1/hadoop-core-1.2.1.jar:/opt/hadoop-1.2.1/lib/commons-cli-1.2.jar -d ./word_count_class/ WordCount.java
将源码编译成class文件并放在当前文件夹下的word_count_class目录,当然,首先需要创建该目录

2 将源码打成jar包

进入源码目录

jar -cvf wordcount.jar  *

3 上传输入文件

先在hadoop中为本次任务创建一个输入文件存放目录

hadoop fs -mkdir input_wordcount

将input目录下的所有文本文件上传到hadoop中的input_wordcount目录下

hadoop fs -put input/* input_wordcount/

注意:不能在运行前穿创建输出文件夹

4 上传jar并执行

hadoop jar word_count_class/wordcount.jar input_wordcount output_wordcount

5 查看计算结果

程序输出目录

hadoop fs -ls output_wordcount

程序输出内容

hadoop fs -cat output_wordcount/part-r-00000



版本二:自己实际操作中的程序

Map程序

 package com.zln.chapter03;

 import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter; import java.io.IOException;
import java.util.StringTokenizer; /**
* Created by sherry on 15-7-12.
*/
public class WordCountMap extends MapReduceBase implements Mapper<LongWritable,Text,Text,IntWritable> {
private final static IntWritable one = new IntWritable(1);//每个单词 +1
private Text word = new Text(); @Override
public void map(LongWritable longWritable, Text text, OutputCollector<Text, IntWritable> outputCollector, Reporter reporter) throws IOException {
String line = text.toString();
StringTokenizer tokenizer = new StringTokenizer(line);//分割出单词
while (tokenizer.hasMoreTokens()){
word.set(tokenizer.nextToken());
outputCollector.collect(word,one);
}
}
}

Reduce程序

 package com.zln.chapter03;

 import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter; import java.io.IOException;
import java.util.Iterator; /**
* Created by sherry on 15-7-12.
*/
public class WordCountReduce extends MapReduceBase implements Reducer<Text,IntWritable,Text,IntWritable> {
@Override
public void reduce(Text text, Iterator<IntWritable> iterator, OutputCollector<Text, IntWritable> outputCollector, Reporter reporter) throws IOException {
int sum = 0;
while (iterator.hasNext()){
sum += iterator.next().get();
}
outputCollector.collect(text,new IntWritable(sum));
}
}

主函数

 package com.zln.chapter03;

 import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*; import java.io.IOException; /**
* Created by sherry on 15-7-12.
*/
public class WordCount {
public static void main(String[] args) throws IOException {
JobConf conf = new JobConf(WordCount.class);
conf.setJobName("wordCount"); //设置输出格式
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class); //设置MapReduce类
conf.setMapperClass(WordCountMap.class);
conf.setReducerClass(WordCountReduce.class); //设置处理输入类
conf.setInputFormat(TextInputFormat.class);
//设置处理输出类
conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1])); JobClient.runJob(conf);
}
}

准备输入文件

file1

Hello Word By Word
Hello Word By zln

file2

Hello Hadoop
Hello GoodBye

放在同一个目录下:/home/sherry/IdeaProjects/Hadoop/WordCount/输入文件准备

编译class打成一个jar包

我使用IDEA进行编译。注意不要忘记指定main函数

上传输入文件

root@sherry:/opt/hadoop-1.2.# hadoop fs -mkdir /user/root/zln/WordCount/InputFiles
root@sherry:/opt/hadoop-1.2.# hadoop fs -put /home/sherry/IdeaProjects/Hadoop/WordCount/输入文件准备/* /user/root/zln/WordCount/InputFiles

上传jar并执行

root@sherry:/opt/hadoop-1.2.# hadoop jar /home/sherry/IdeaProjects/Hadoop/out/artifacts/WordCount_jar/WordCount.jar /user/root/zln/WordCount/InputFiles /user/root/zln/WordCount/OutputFiles

查看执行结果

root@sherry:/opt/hadoop-1.2.# hadoop fs -ls /user/root/zln/WordCount/OutputFiles
root@sherry:/opt/hadoop-1.2.# hadoop fs -text /user/root/zln/WordCount/OutputFiles/part-


版本三:使用新版本的API对Map  Reduce  main函数进行重写

Map

 package com.zln.chapter03;

 import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException;
import java.util.StringTokenizer; /**
* Created by sherry on 15-7-12.
*/
public class WordCountMap extends Mapper<LongWritable,Text,Text,IntWritable> {
private final static IntWritable one = new IntWritable(1);//每个单词 +1
private Text word = new Text(); @Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);//分割出单词
while (tokenizer.hasMoreTokens()){
word.set(tokenizer.nextToken());
context.write(word,one);
}
} }

Reduce

 package com.zln.chapter03;

 import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; /**
* Created by sherry on 15-7-12.
*/
public class WordCountReduce extends Reducer<Text,IntWritable,Text,IntWritable> { @Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable intWritable:values){
sum += intWritable.get();
}
context.write(key,new IntWritable(sum));
}
}

Main

 package com.zln.chapter03;

 import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; /**
* Created by sherry on 15-7-12.
*/
public class WordCount extends Configured implements Tool{ public int run(String[] args) throws Exception {
Job job = new Job(getConf());
job.setJarByClass(WordCount.class);
job.setJobName("WordCount"); job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class); job.setMapperClass(WordCountMap.class);
job.setReducerClass(WordCountReduce.class); job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1])); boolean success = job.waitForCompletion(true);
return success?0:1;
} public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new WordCount(),args);
System.exit(ret);
}
}

使用hadoop统计多个文本中每个单词数目的更多相关文章

  1. C#统计给定的文本中字符出现的次数,使用循环和递归两种方法

    前几天看了一个.net程序员面试题目,题目是”统计给定的文本中字符出现的次数,使用循环和递归两种方法“. 下面是我对这个题目的解法: 1.使用循环: /// <summary> /// 使 ...

  2. python统计文本中每个单词出现的次数

    .python统计文本中每个单词出现的次数: #coding=utf-8 __author__ = 'zcg' import collections import os with open('abc. ...

  3. Perl-统计文本中各个单词出现的次数(NVDIA2019笔试)

    1.原题 2.perl脚本 print "================ Method 1=====================\n"; open IN,'<','an ...

  4. Python的 counter内置函数,统计文本中的单词数量

    counter是 colletions内的一个类 可以理解为一个简单的计数 import collections str1=['a','a','b','d'] m=collections.Counte ...

  5. C#统计英文文本中的单词数并排序

    思路如下:1.使用的Hashtable(高效)集合,记录每个单词出现的次数2.采用ArrayList对Hashtable中的Keys按字母序排列3.排序使用插入排序(稳定) public void S ...

  6. C++统计一段文字中各单词出现的频率

    #include <iostream> using namespace std; /* run this program using the console pauser or add y ...

  7. 一个简单的程序,统计文本文档中的单词和汉字数,逆序排列(出现频率高的排在最前面)。python实现。

    仅简单统计英文. from collections import Counter f = open('1') c = Counter() for line in f: g = (x for x in ...

  8. ruby的hash学习笔记例: 将字符串文本中的单词存放在map中

    text = 'The rain in Spain falls mainly in the plain.'first = Hash.new []second = Hash.new {|hash,key ...

  9. python统计英文文本中的回文单词数

    1. 要求: 给定一篇纯英文的文本,统计其中回文单词的比列,并输出其中的回文单词,文本数据如下: This is Everyday Grammar. I am Madam Lucija And I a ...

随机推荐

  1. JavaWeb —— JSP 总结

      JSP总结 静态网页 在网站设计中,纯粹HTML(标准通用标记语言下的一个应用)格式的网页通常被称为“静态网页”,静态网页是标准的HTML文件,它的文件扩展名是.htm..html  .静态网页是 ...

  2. 知识总结和记录——HTML

    文档结构 <!DOCTYPE html> <html lang="zh-CN"> <head> <meta charset="U ...

  3. OceanBase安装

    背景: OceanBase是阿里巴巴.蚂蚁金服自主研发的可扩展的分布式关系数据库,实现了数千亿条记录.数百 TB 数据上的跨行跨表事务,主要支持支付宝核心的交易.支付.会员和账务系统等 OLTP 和  ...

  4. ssm整合-错误

    [Err] 1005 - Can't create table 'imoocdb.ec_article' (errno: 150) 这个错误由外键约束引起的 java.lang.ClassCastEx ...

  5. Navicat Premium Mac 12 破解

    破解地址:https://blog.csdn.net/xhd731568849/article/details/79751188 亲测有效

  6. JS下载文件常用的方式

    下载附件(image,doc,docx, excel,zip,pdf),应该是实际工作中经常遇到一个问题:这里使用过几种方式分享出来仅供参考; 初次写可能存在问题,有问题望指出 ​ 主要了解的几个知识 ...

  7. 【c学习-4】

    //递归函数,调用自身 #include<stdio.h> int fibFunc(int n) { || n==){ ; }else{ )+fibFunc(n-); } } int ma ...

  8. 天气预报api-汇总

    和风天气 https://www.heweather.com/

  9. 猜数字问题 python

    猜数字问题,要求如下: ① 随机生成一个整数 ② 猜一个数字并输入 ③ 判断是大是小,直到猜正确 ④ 判断时间提示:需要用time模块.random模块该题目不需要创建函数 import random ...

  10. Windows Server 远程桌面连接不上问题解决

    关于Windows Server 远程桌面连接不上的问题需要从服务.端口.防火墙这几方面进行检查: 服务器上需要 开启的服务: - Remote Access Auto Connection Mana ...