mr微博内容推荐
第一次迭代
1 package com.laoxiao.mr.weibo; import java.io.StringReader; import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme; /**
* 第一个MR,计算TF和计算N(微博总数)
* @author root
*
*/
public class firstMapper extends Mapper<LongWritable, Text, Text, IntWritable>{ protected void map(LongWritable key, Text value, Context context)
throws java.io.IOException ,InterruptedException {
String [] temp=StringUtils.split(value.toString(),"\t");
if(temp.length>=2){
String id=temp[0].trim();
String str=temp[1].trim();
StringReader sr =new StringReader(str);
IKSegmenter ikSegmenter =new IKSegmenter(sr, true);
Lexeme word=null;
while( (word=ikSegmenter.next()) !=null ){
String w= word.getLexemeText();
context.write(new Text(w+"_"+id), new IntWritable(1));
}
context.write(new Text("count"), new IntWritable(1));
}else{
System.out.println("value is error:"+value.toString());
}
};
}
package com.laoxiao.mr.weibo; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; import sun.management.resources.agent; public class firstReducer extends Reducer<Text, IntWritable, Text, IntWritable>{ protected void reduce(Text arg0, java.lang.Iterable<IntWritable> arg1, Context arg2)
throws java.io.IOException ,InterruptedException {
int sum=0;
for (IntWritable i : arg1) {
sum+=i.get();
}
arg2.write(arg0, new IntWritable(sum));
};
} package com.laoxiao.mr.weibo; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner; public class firstRepartition extends HashPartitioner<Text, IntWritable>{ @Override
public int getPartition(Text key, IntWritable value, int numReduceTasks) {
if(key.toString().equals("count")){
return 3;
}else{
return super.getPartition(key, value, numReduceTasks-1);
} }
} package com.laoxiao.mr.weibo; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class firstJob { public static void main(String[] args) {
Configuration config=new Configuration();
config.set("fs.defaultFS", "hdfs://node1:8020");
config.set("yarn.resourcemanager.hostname", "node1");
try {
FileSystem fs =FileSystem.get(config);
Job job=Job.getInstance(config);
job.setJarByClass(firstJob.class);
job.setJobName("weibo1"); job.setMapperClass(firstMapper.class);
job.setReducerClass(firstReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setPartitionerClass(firstRepartition.class);
//job.setCombinerClass(firstReducer.class);
job.setNumReduceTasks(4); FileInputFormat.addInputPath(job, new Path("/root/input/data/weibo.txt")); Path path =new Path("/usr/output/weibo1");
if(fs.exists(path)){
fs.delete(path, true);
}
FileOutputFormat.setOutputPath(job,path); boolean f= job.waitForCompletion(true);
if(f){
System.out.println("first job run finished!!");
} } catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
第二次迭代
package com.laoxiao.mr.weibo; import java.io.IOException; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
//统计df:词在多少个微博中出现过。
public class secondMapper extends Mapper<LongWritable, Text, Text, IntWritable>{ protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException { //获取当前 mapper task的数据片段(split)
FileSplit fs = (FileSplit) context.getInputSplit(); if (!fs.getPath().getName().contains("part-r-00003")) { String[] v = value.toString().trim().split("\t");
if (v.length >= 2) {
String[] ss = v[0].split("_");
if (ss.length >= 2) {
String w = ss[0];
context.write(new Text(w), new IntWritable(1));
}
} else {
System.out.println(value.toString() + "-------------");
}
} }
}
package com.laoxiao.mr.weibo; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; public class secondReducer extends Reducer<Text, IntWritable, Text, IntWritable>{ protected void reduce(Text arg0, java.lang.Iterable<IntWritable> arg1, Context context)
throws java.io.IOException ,InterruptedException {
int sum=0;
for (IntWritable i : arg1) {
sum+=1;
}
context.write(arg0, new IntWritable(sum));
};
}
package com.laoxiao.mr.weibo; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class secondJob { public static void main(String[] args) {
Configuration config=new Configuration();
config.set("fs.defaultFS", "hdfs://node1:8020");
config.set("yarn.resourcemanager.hostname", "node1");
try {
FileSystem fs =FileSystem.get(config);
Job job=Job.getInstance(config);
job.setJarByClass(secondJob.class);
job.setJobName("weibo2"); job.setMapperClass(secondMapper.class);
job.setReducerClass(secondReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//job.setPartitionerClass(firstRepartition.class);
//job.setCombinerClass(firstReducer.class);
//job.setNumReduceTasks(4); FileInputFormat.addInputPath(job, new Path("/usr/output/weibo1")); Path path =new Path("/usr/output/weibo2");
if(fs.exists(path)){
fs.delete(path, true);
}
FileOutputFormat.setOutputPath(job,path); boolean f= job.waitForCompletion(true);
if(f){
System.out.println("second job run finished!!");
} } catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
第三次迭代
package com.laoxiao.mr.weibo; import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.net.URI;
import java.text.NumberFormat;
import java.util.HashMap;
import java.util.Map; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme; /**
* 最后计算
* @author root
*
*/
public class LastMapper extends Mapper<LongWritable, Text, Text, Text> {
//存放微博总数
public static Map<String, Integer> cmap = null;
//存放df
public static Map<String, Integer> df = null; // 在map方法执行之前
protected void setup(Context context) throws IOException,
InterruptedException {
System.out.println("******************");
if (cmap == null || cmap.size() == 0 || df == null || df.size() == 0) { URI[] ss = context.getCacheFiles();
if (ss != null) {
for (int i = 0; i < ss.length; i++) {
URI uri = ss[i];
if (uri.getPath().endsWith("part-r-00003")) {//微博总数
Path path =new Path(uri.getPath());
// FileSystem fs =FileSystem.get(context.getConfiguration());
// fs.open(path);
BufferedReader br = new BufferedReader(new FileReader(path.getName()));
String line = br.readLine();
if (line.startsWith("count")) {
String[] ls = line.split("\t");
cmap = new HashMap<String, Integer>();
cmap.put(ls[0], Integer.parseInt(ls[1].trim()));
}
br.close();
} else if (uri.getPath().endsWith("part-r-00000")) {//词条的DF
df = new HashMap<String, Integer>();
Path path =new Path(uri.getPath());
BufferedReader br = new BufferedReader(new FileReader(path.getName()));
String line;
while ((line = br.readLine()) != null) {
String[] ls = line.split("\t");
df.put(ls[0], Integer.parseInt(ls[1].trim()));
}
br.close();
}
}
}
}
} protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
FileSplit fs = (FileSplit) context.getInputSplit();
// System.out.println("--------------------");
if (!fs.getPath().getName().contains("part-r-00003")) { String[] v = value.toString().trim().split("\t");
if (v.length >= 2) {
int tf =Integer.parseInt(v[1].trim());//tf值
String[] ss = v[0].split("_");
if (ss.length >= 2) {
String w = ss[0];
String id=ss[1]; double s=tf * Math.log(cmap.get("count")/df.get(w));
NumberFormat nf =NumberFormat.getInstance();
nf.setMaximumFractionDigits(5);
context.write(new Text(id), new Text(w+":"+nf.format(s)));
}
} else {
System.out.println(value.toString() + "-------------");
}
}
}
}
package com.laoxiao.mr.weibo; import java.io.IOException; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; public class LastReduce extends Reducer<Text, Text, Text, Text>{ protected void reduce(Text key, Iterable<Text> arg1,
Context context)
throws IOException, InterruptedException { StringBuffer sb =new StringBuffer(); for( Text i :arg1 ){
sb.append(i.toString()+"\t");
} context.write(key, new Text(sb.toString()));
} }
package com.laoxiao.mr.weibo; import java.io.IOException; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class LastJob { public static void main(String[] args) {
Configuration config =new Configuration();
config.set("fs.defaultFS", "hdfs://node1:8020");
config.set("yarn.resourcemanager.hostname", "node1");
//config.set("mapred.jar", "C:\\Users\\Administrator\\Desktop\\weibo3.jar");
try {
FileSystem fs =FileSystem.get(config);
//JobConf job =new JobConf(config);
Job job =Job.getInstance(config);
job.setJarByClass(LastJob.class);
job.setJobName("weibo3"); // DistributedCache.addCacheFile(uri, conf);
//2.5
//把微博总数加载到内存
job.addCacheFile(new Path("/usr/output/weibo1/part-r-00003").toUri());
//把df加载到内存
job.addCacheFile(new Path("/usr/output/weibo2/part-r-00000").toUri()); //设置map任务的输出key类型、value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// job.setMapperClass();
job.setMapperClass(LastMapper.class);
job.setReducerClass(LastReduce.class); //mr运行时的输入数据从hdfs的哪个目录中获取
FileInputFormat.addInputPath(job, new Path("/usr/output/weibo1"));
Path outpath =new Path("/usr/output/weibo3");
if(fs.exists(outpath)){
fs.delete(outpath, true);
}
FileOutputFormat.setOutputPath(job,outpath ); boolean f= job.waitForCompletion(true);
if(f){
System.out.println("执行job成功");
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
mr微博内容推荐的更多相关文章
- 微博广告推荐中有关Hadoop的那些事
一.背景 微博,一个DAU上亿.每日发博量几千万的社交性产品,拥有庞大的数据集.如何高效得从如此规模的数据集中挖掘出有价值的信息,以增强用户粘性,提高信息传播速度,就成了重中之重.因此,我们引入了ha ...
- 基于scrapy的分布式爬虫抓取新浪微博个人信息和微博内容存入MySQL
为了学习机器学习深度学习和文本挖掘方面的知识,需要获取一定的数据,新浪微博的大量数据可以作为此次研究历程的对象 一.环境准备 python 2.7 scrapy框架的部署(可以查看上一篇博客的简 ...
- 基于KNN的相关内容推荐
如果做网站的内容运营,相关内容推荐可以帮助用户更快地寻找和发现感兴趣的信息,从而提升网站内容浏览的流畅性,进而提升网站的价值转化.相关内容 推荐最常见的两块就是“关联推荐”和“相关内容推荐”,关联推荐 ...
- HBase在搜狐内容推荐引擎系统中的应用
转自:http://www.aboutyun.com/thread-7297-1-1.html Facebook放弃Cassandra之后,对HBase 0.89版本进行了大量稳定性优化,使它真正成为 ...
- 基于Tags的简单内容推荐的实现
原来为了简单方便,自己小网站上的文章页的相关内容推荐就是从数据库里随机抽取数据来填充一个列表,所以一点相关性都没有,更本没有办法引导用户去访问推荐内容. 算法选择 如何能做到相似内容的推荐呢,碍于小网 ...
- django集成微博内容
登录微博 我的工具 OK. 分享sns网站的网址分享道.去上面获取代码就可. 改版后叫微博秀
- Python 爬虫 ajax爬取马云爸爸微博内容
ajax爬取情况 有时候我们在用 Requests 抓取页面的时候,得到的结果可能和在浏览器中看到的是不一样的,在浏览器中可以看到正常显示的页面数据,但是使用 Requests 得到的结果并没有,这其 ...
- 微博推荐算法学习(Weibo Recommend Algolrithm)
原文:http://hijiangtao.github.io/2014/10/06/WeiboRecommendAlgorithm/ 基础及关联算法 作用:为微博推荐挖掘必要的基础资源.解决推荐时的通 ...
- python爬虫实战(六)--------新浪微博(爬取微博帐号所发内容,不爬取历史内容)
相关代码已经修改调试成功----2017-4-13 详情代码请移步我的github:https://github.com/pujinxiao/sina_spider 一.说明 1.目标网址:新浪微博 ...
随机推荐
- gentoo 画框架图,流程图
需要话框架图,流程图的时候,只需要安装 dia 软件就可以了.
- MVC人员管理系统
基本都要使用C控制器中的两个action来完成操作,一个用于从主界面跳转到新页面.同时将所需操作的数据传到新界面,另一个则对应新界面的按钮,用于完成操作.将数据传回主界面以及跳转回主界面.根据不同情况 ...
- Netty从入门到精通到放弃
有时间想记录一下关于Netty的知识. 第一课:传统IO的特点. package com.example.aimei.controller; import java.io.InputStream; i ...
- How to Build a New Habit: This is Your Strategy Guide
How to Build a New Habit: This is Your Strategy Guide by James ClearRead this on JamesClear.com Acco ...
- Jmeter软件介绍
1.软件结构 Apache JMeter是Apache组织开发的基于Java的压力测试工具.用于对软件做压力测试,它最初被设计用于Web应用测试,但后来扩展到其他测试领域. 它可以用于测试静态和动态资 ...
- EF.Mysql在codefirst模式下调用存储过程,和再DbFirst模式下的调用
List<GetUserUpCrmList_Result> r = null; using (var context = new HimallContext()) { var parame ...
- 项目(八) Jenkins持续集成与构建
Jenkins环境搭建 由于Jenkins是依赖于java的,所以先介绍java环境的搭建 1)使用官方的二进制包解压安装,官方二进制包的下载地址:http://www.oracle.com/tech ...
- Codeforces Round #436 B. Polycarp and Letters
题意:给你一串长度为n的字符,由大小写字母组成,求连续的小写子串中不同字母个数的最大值. Input 11aaaaBaabAbA Output 2 Input 12zACaAbbaazzC Outpu ...
- StringUtils.isEmpty StringUtils.isBlank
两个方法都是判断字符是否为空的.前者是要求没有任何字符,即str==null 或 str.length()==0:后者要求是空白字符,即无意义字符.其实isBlank判断的空字符是包括了isEmpty ...
- Excel VBA 连接各种数据库(二) VBA连接Oracle数据库
本文主要内容: Oracle环境配置 ODBC驱动设置.第三方驱动下载 VBA连接Oracle连接方法 Oracle10g官方免账号下载地址 系统环境: Windows 7 64bit Excel 2 ...