大数据学习——mapreduce运营商日志增强
需求
- 1、对原始json数据进行解析,变成普通文本数据
- 2、求出每个人评分最高的3部电影
- 3、求出被评分次数最多的3部电影
数据
https://pan.baidu.com/s/1gPsQXVYSQEZ2OYek4HxK6A
pom.xml
- <?xml version="1.0" encoding="UTF-8"?>
- <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <groupId>com.cyf</groupId>
- <artifactId>MapReduceCases</artifactId>
- <packaging>jar</packaging>
- <version>1.0</version>
- <properties>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
- </properties>
- <dependencies>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- <version>2.6.4</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- <version>2.6.4</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-client</artifactId>
- <version>2.6.4</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-core</artifactId>
- <version>2.6.4</version>
- </dependency>
- <dependency>
- <groupId>com.alibaba</groupId>
- <artifactId>fastjson</artifactId>
- <version>1.1.40</version>
- </dependency>
- <dependency>
- <groupId>mysql</groupId>
- <artifactId>mysql-connector-java</artifactId>
- <version>5.1.36</version>
- </dependency>
- </dependencies>
- <build>
- <plugins>
- <plugin>
- <artifactId>maven-assembly-plugin</artifactId>
- <configuration>
- <appendAssemblyId>false</appendAssemblyId>
- <descriptorRefs>
- <descriptorRef>jar-with-dependencies</descriptorRef>
- </descriptorRefs>
- <archive>
- <manifest>
- <mainClass>cn.itcast.mapreduce.json.JsonToText</mainClass>
- </manifest>
- </archive>
- </configuration>
- <executions>
- <execution>
- <id>make-assembly</id>
- <phase>package</phase>
- <goals>
- <goal>assembly</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- </project>
- package cn.itcast.mapreduce.json;
- import java.io.DataInput;
- import java.io.DataOutput;
- import java.io.IOException;
- import org.apache.hadoop.io.NullWritable;
- import org.apache.hadoop.io.WritableComparable;
- public class OriginBean implements WritableComparable<OriginBean> {
- private Long movie;
- private Long rate;
- private Long timeStamp;
- private Long uid;
- public Long getMovie() {
- return movie;
- }
- public void setMovie(Long movie) {
- this.movie = movie;
- }
- public Long getRate() {
- return rate;
- }
- public void setRate(Long rate) {
- this.rate = rate;
- }
- public Long getTimeStamp() {
- return timeStamp;
- }
- public void setTimeStamp(Long timeStamp) {
- this.timeStamp = timeStamp;
- }
- public Long getUid() {
- return uid;
- }
- public void setUid(Long uid) {
- this.uid = uid;
- }
- public OriginBean(Long movie, Long rate, Long timeStamp, Long uid) {
- this.movie = movie;
- this.rate = rate;
- this.timeStamp = timeStamp;
- this.uid = uid;
- }
- public OriginBean() {
- // TODO Auto-generated constructor stub
- }
- public int compareTo(OriginBean o) {
- return this.movie.compareTo(o.movie);
- }
- public void write(DataOutput out) throws IOException {
- out.writeLong(movie);
- out.writeLong(rate);
- out.writeLong(timeStamp);
- out.writeLong(uid);
- }
- public void readFields(DataInput in) throws IOException {
- this.movie = in.readLong();
- this.rate = in.readLong();
- this.timeStamp = in.readLong();
- this.uid = in.readLong();
- }
- @Override
- public String toString() {
- return this.movie + "\t" + this.rate + "\t" + this.timeStamp + "\t" + this.uid;
- }
- }
- package cn.itcast.mapreduce.json;
- import java.io.IOException;
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.io.LongWritable;
- import org.apache.hadoop.io.NullWritable;
- import org.apache.hadoop.io.Text;
- import org.apache.hadoop.mapreduce.Job;
- import org.apache.hadoop.mapreduce.Mapper;
- import org.apache.hadoop.mapreduce.Reducer;
- import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
- import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
- import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
- import org.codehaus.jackson.map.ObjectMapper;
- import com.alibaba.fastjson.JSON;
- import com.alibaba.fastjson.JSONObject;
- public class JsonToText {
- static class MyMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
- Text k = new Text();
- @Override
- protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
- // Bean bean = mapper.readValue(value.toString(), Bean.class);
- JSONObject valueJson = JSON.parseObject(value.toString());
- Long movie = valueJson.getLong("movie");
- OriginBean bean = new OriginBean(movie, valueJson.getLong("rate"), valueJson.getLong("timeStamp"), valueJson.getLong("uid"));
- k.set(bean.toString());
- context.write(k, NullWritable.get());
- }
- }
- public static void main(String[] args) throws Exception {
- Configuration conf = new Configuration();
- //16777216/1024/1024=16 (62.5M/16)4个切片,启动4个maptask,处理结果4个文件
- conf.set("mapreduce.input.fileinputformat.split.maxsize", "16777216"); Job job = Job.getInstance(conf); // job.setJarByClass(JsonToText.class); //告诉框架,我们的程序所在jar包的位置 job.setJar("/root/JsonToText.jar"); job.setMapperClass(MyMapper.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(NullWritable.class); // job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setNumReduceTasks(0); FileInputFormat.setInputPaths(job, new Path("/json/input")); FileOutputFormat.setOutputPath(job, new Path("/json/output")); // FileInputFormat.setInputPaths(job, new Path(args[0])); // FileOutputFormat.setOutputPath(job, new Path(args[1])); job.waitForCompletion(true); } }
创建文件夹 并上传数据
hadoop fs -mkdir -p /json/input
hadoop fs -put rating.json /json/input
运行
hadoop jar JsonToText.jar cn.itcast.mapreduce.json.JsonToText
运行结果
https://pan.baidu.com/s/1ayrpl7w8Dlzpc7TRZIO94w
pom.xml
- <?xml version="1.0" encoding="UTF-8"?>
- <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <groupId>com.cyf</groupId>
- <artifactId>MapReduceCases</artifactId>
- <packaging>jar</packaging>
- <version>1.0</version>
- <properties>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
- </properties>
- <dependencies>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- <version>2.6.4</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-hdfs</artifactId>
- <version>2.6.4</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-client</artifactId>
- <version>2.6.4</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-core</artifactId>
- <version>2.6.4</version>
- </dependency>
- <dependency>
- <groupId>com.alibaba</groupId>
- <artifactId>fastjson</artifactId>
- <version>1.1.40</version>
- </dependency>
- <dependency>
- <groupId>mysql</groupId>
- <artifactId>mysql-connector-java</artifactId>
- <version>5.1.36</version>
- </dependency>
- </dependencies>
- <build>
- <plugins>
- <plugin>
- <artifactId>maven-assembly-plugin</artifactId>
- <configuration>
- <appendAssemblyId>false</appendAssemblyId>
- <descriptorRefs>
- <descriptorRef>jar-with-dependencies</descriptorRef>
- </descriptorRefs>
- <archive>
- <manifest>
- <mainClass>cn.itcast.mapreduce.json.MovieRateSum</mainClass>
- </manifest>
- </archive>
- </configuration>
- <executions>
- <execution>
- <id>make-assembly</id>
- <phase>package</phase>
- <goals>
- <goal>assembly</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- </project>
- package cn.itcast.mapreduce.json;
- import java.io.DataInput;
- import java.io.DataOutput;
- import java.io.IOException;
- import org.apache.hadoop.io.WritableComparable;
- public class ResultBean implements WritableComparable<ResultBean> {
- private Long movie;
- private Long sumRate;
- public void setSumRate(long sumRate) {
- this.sumRate = sumRate;
- }
- public Long getMovie() {
- return movie;
- }
- public void setMovie(Long movie) {
- this.movie = movie;
- }
- public ResultBean(Long movie, Long sumRate) {
- this.movie = movie;
- this.sumRate = sumRate;
- }
- public ResultBean() {
- // TODO Auto-generated constructor stub
- }
- public int compareTo(ResultBean o) {
- if (this.movie - o.movie != 0) {
- return (int) (this.movie - o.movie);
- }
- return (int) (o.sumRate - this.sumRate);
- }
- public void write(DataOutput out) throws IOException {
- out.writeLong(movie);
- out.writeLong(sumRate);
- }
- public ResultBean(Long sumRate) {
- super();
- this.sumRate = sumRate;
- }
- public void readFields(DataInput in) throws IOException {
- this.movie = in.readLong();
- this.sumRate = in.readLong();
- }
- @Override
- public String toString() {
- //return movie + "\t" + sumRate;
- return movie + "\t" + sumRate;
- }
- }
- package cn.itcast.mapreduce.json;
- import java.io.IOException;
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.io.LongWritable;
- import org.apache.hadoop.io.NullWritable;
- import org.apache.hadoop.io.Text;
- import org.apache.hadoop.mapreduce.Job;
- import org.apache.hadoop.mapreduce.Mapper;
- import org.apache.hadoop.mapreduce.Reducer;
- import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
- import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
- import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
- import org.codehaus.jackson.map.ObjectMapper;
- import com.alibaba.fastjson.JSON;
- import com.alibaba.fastjson.JSONObject;
- public class MovieRateSum {
- static class MyMapper extends Mapper<LongWritable, Text, LongWritable, OriginBean> {
- ObjectMapper mapper = new ObjectMapper();
- @Override
- protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
- // Bean bean = mapper.readValue(value.toString(), Bean.class);
- JSONObject valueJson = JSON.parseObject(value.toString());
- Long movie = valueJson.getLong("movie");
- OriginBean bean = new OriginBean(movie, valueJson.getLong("rate"), valueJson.getLong("timeStamp"), valueJson.getLong("uid"));
- context.write(new LongWritable(bean.getMovie()), bean);
- }
- }
- static class MyReduce extends Reducer<LongWritable, OriginBean, ResultBean, NullWritable> {
- @Override
- protected void reduce(LongWritable movie, Iterable<OriginBean> beans, Context context) throws IOException, InterruptedException {
- long sum = 0L;
- for (OriginBean bean : beans) {
- sum += bean.getRate();
- }
- ResultBean bean = new ResultBean();
- bean.setMovie(movie.get());
- bean.setSumRate(sum);
- context.write(bean, NullWritable.get());
- }
- }
- public static void main(String[] args) throws Exception {
- Configuration conf = new Configuration();
- Job job = Job.getInstance(conf);
- // job.setJarByClass(MovieRateSum.class);
- //告诉框架,我们的程序所在jar包的位置
- job.setJar("/root/MovieRateSum.jar");
- job.setMapperClass(MyMapper.class);
- job.setReducerClass(MyReduce.class);
- job.setMapOutputKeyClass(LongWritable.class);
- job.setMapOutputValueClass(OriginBean.class);
- job.setOutputKeyClass(ResultBean.class);
- job.setOutputValueClass(NullWritable.class);
- job.setOutputFormatClass(SequenceFileOutputFormat.class);
- FileInputFormat.setInputPaths(job, new Path("/json/output"));
- FileOutputFormat.setOutputPath(job, new Path("/json/output-seq"));
- // FileInputFormat.setInputPaths(job, new Path(args[0]));
- // FileOutputFormat.setOutputPath(job, new Path(args[1]));
- job.waitForCompletion(true);
- }
- }
大数据学习——mapreduce运营商日志增强的更多相关文章
- 大数据学习——点击流日志每天都10T,在业务应用服务器上,需要准实时上传至(Hadoop HDFS)上
点击流日志每天都10T,在业务应用服务器上,需要准实时上传至(Hadoop HDFS)上 1需求说明 点击流日志每天都10T,在业务应用服务器上,需要准实时上传至(Hadoop HDFS)上 2需求分 ...
- 大数据学习——mapreduce案例join算法
需求: 用mapreduce实现select order.orderid,order.pdtid,pdts.pdt_name,oder.amount from orderjoin pdtson ord ...
- 大数据学习——mapreduce学习topN问题
求每一个订单中成交金额最大的那一笔 top1 数据 Order_0000001,Pdt_01,222.8 Order_0000001,Pdt_05,25.8 Order_0000002,Pdt_05 ...
- 大数据学习——mapreduce共同好友
数据 commonfriends.txt A:B,C,D,F,E,O B:A,C,E,K C:F,A,D,I D:A,E,F,L E:B,C,D,M,L F:A,B,C,D,E,O,M G:A,C,D ...
- 大数据学习——mapreduce倒排索引
数据 a.txt hello jerry hello tom b.txt allen tom allen jerry allen hello c.txt hello jerry hello tom 1 ...
- 大数据学习——mapreduce汇总手机号上行流量下行流量总流量
时间戳 手机号 MAC地址 ip 域名 上行流量包个数 下行 上行流量 下行流量 http状态码 1363157995052 13826544101 5C-0E-8B-C7-F1-E0:CMCC 12 ...
- 大数据学习——mapreduce程序单词统计
项目结构 pom.xml文件 <?xml version="1.0" encoding="UTF-8"?> <project xmlns=&q ...
- 大数据学习——MapReduce学习——字符统计WordCount
操作背景 jdk的版本为1.8以上 ubuntu12 hadoop2.5伪分布 安装 Hadoop-Eclipse-Plugin 要在 Eclipse 上编译和运行 MapReduce 程序,需要安装 ...
- 大数据学习系列之七 ----- Hadoop+Spark+Zookeeper+HBase+Hive集群搭建 图文详解
引言 在之前的大数据学习系列中,搭建了Hadoop+Spark+HBase+Hive 环境以及一些测试.其实要说的话,我开始学习大数据的时候,搭建的就是集群,并不是单机模式和伪分布式.至于为什么先写单 ...
随机推荐
- Codeforces Round #410 (Div. 2) C
Description Mike has a sequence A = [a1, a2, ..., an] of length n. He considers the sequence B = [b1 ...
- 1-17finally关键字
finally的特点 被finally控制的语句体一定会执行,除非在执行finally语句体之前JVM退出(比如System.exit(0)),一般用于关闭资源 finally如何使用? finall ...
- Java GC基础
Java的垃圾回收机制负责回收无用对象占据的内存资源,但是有特殊情况:假定对象不是使用new关键字获得了一块儿“特殊”的内存区域,
- AJPFX的反射学习笔记
反射是描述 数据结构的结构 属性.方法(数据)元数据 类(数据结构)描述数据的结构-->类也是特殊的对象---->元数据 CLASS类 描述数据结 ...
- IP查询系统的异步回调案例
package com.lxj.demo; import java.io.BufferedReader; import java.io.IOException; import java.io.Inpu ...
- 获取当前目录 文件输出html 网页查看
@echo off setlocal set LISTFILE=list.html echo MAKING LISTFILE … (PLEASE WAIT) echo ^<!doctype ht ...
- upupw nginx服务器 rewrite设置
最近开始尝试使用upupw的Nginx套件做开发,感觉还挺不错的,也遇到了一些问题,决定在这里记录一下,同时也希望可以帮助到一些人. 用习惯了Apache,改用Nginx之后会有些不适应,但是咬咬牙就 ...
- 1《数学之美》第1章 文字和语言 vs 数字和信息
1<数学之美>第1章 文字和语言 vs 数字和信息
- Unity复杂的旋转-欧拉角和四元数
一.欧拉角欧拉角最容易表示,用三个变量X,Y,Z可以直观的表示绕着某个轴的旋转角度. 在Unity里就是Transform组件的Rotation里的X Y Z三个变量代表了欧拉角 二.四元数四元数相比 ...
- oracle的Hint
与优化器模式相关的Hint 1 ALl_ROWS 让优化器启用CBO /*+ all_rows */ 2 first_rows(n) 让优化器启用CBO 模式,而且得到目标sql的执行计 ...