1、dirver

package com.kangaroo.hadoop.drive;

import java.util.Map;
import java.util.Properties; import com.kangaroo.hadoop.mapper.AggregateMapper;
import com.kangaroo.hadoop.reducer.AggregateReducer;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import com.kangaroo.hadoop.utils.PropertiesUtil; public class DriveMain extends Configured implements Tool { private static final Logger logger = LoggerFactory.getLogger(DriveMain.class);
private Configuration conf;
private PropertiesUtil propUtil; public DriveMain() {
this.conf = new Configuration();
this.propUtil = new PropertiesUtil("configure.properties");
} public int run(String[] args) throws Exception {
try {
logger.info("MapReduce Job Beginning.");
String dbName = args[0];
String tableName = args[1];
String partition = args[2];
String sumField = args[3];
String outPath = args[4];
String partFilter = partitionFormat(partition);
logger.info("[Params] dbName:{}; tableName:{}, partition:{}, sumField:{}, outPath:{}, partFilter:{}",
dbName, tableName, partition, sumField, outPath, partFilter);
this.conf.set("sumField", sumField);
this.setMapRedConfiguration();
Job job = this.setJobConfiguration(this.conf);
HCatInputFormat.setInput(job, dbName, tableName, partFilter);
logger.info("setInput successfully.");
FileOutputFormat.setOutputPath(job, new Path(outPath));
logger.info("setOutput successfully.");
return (job.waitForCompletion(true) ? 0 : 1);
} catch (Exception ex) {
logger.error(ex.getMessage());
throw ex;
}
} private Job setJobConfiguration(Configuration conf) throws Exception {
try {
logger.info("enter setJobConfiguration");
Job job = Job.getInstance(conf);
job.setJarByClass(DriveMain.class);
job.setInputFormatClass(HCatInputFormat.class);
job.setMapperClass(AggregateMapper.class);
job.setReducerClass(AggregateReducer.class); job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
logger.info("setJobConfiguration successfully.");
return job;
} catch (Exception ex) {
logger.error("setJobConfiguration: " + ex.getMessage());
throw new Exception(ex);
}
} private void setMapRedConfiguration() {
try {
Properties properties = propUtil.getProperties();
logger.info("Load MapReduce Configuration Successfully.");
for (Map.Entry entry : properties.entrySet()) {
if (entry.getKey().toString().startsWith("mapred")) {
conf.set(entry.getKey().toString(), entry.getValue().toString());
logger.info("[MR][Config] key:{}, value:{}", entry.getKey().toString(), entry.getValue().toString());
}
}
logger.info("[MR][Config] Set MapReduce Configuration Successfully.");
} catch (Exception e) { } } private String partitionFormat(String partition) {
String format = "";
if(!partition.contains("pt") && ! partition.contains("dt")) {
String[] items = partition.split("/");
String[] keys = {"year","month","day", "hour"};
for(int i=0; i<items.length; i++) {
if (i == items.length-1) {
format += keys[i] + "='" + items[i] + "'";
} else {
format += keys[i] + "='" + items[i] + "' and ";
}
}
} else {
format = partition;
}
return format;
} public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new DriveMain(), args);
System.exit(exitCode);
} }

2、Mapper

package com.kangaroo.hadoop.mapper;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hive.hcatalog.data.HCatRecord;
import org.apache.hive.hcatalog.data.schema.HCatSchema;
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import java.io.IOException;
import java.util.HashMap;
import java.util.Map; @SuppressWarnings("rawtypes")
public class AggregateMapper extends Mapper<WritableComparable, HCatRecord, Text, Text> { private static final Logger logger = LoggerFactory.getLogger(AggregateMapper.class); private HCatSchema schema;
private Text outKey;
private Text outValue;
private IntWritable one; @Override
protected void setup(Context context) throws IOException, InterruptedException {
outKey = new Text();
outValue = new Text();
schema = HCatInputFormat.getTableSchema(context.getConfiguration());
} @Override
protected void map(WritableComparable key, HCatRecord value, Context context) throws IOException, InterruptedException {
String sumField = context.getConfiguration().get("sumField");
Map<String, String> recordMap = new HashMap<String, String>();
for (String fieldName : schema.getFieldNames()) {
logger.info("fieldName={}", fieldName);
String fieldValue = value.get(fieldName, schema).toString();
logger.info("fieldName={}, fieldValue={}", fieldName, fieldValue);
recordMap.put(fieldName, fieldValue);
logger.info("recordMap={}", recordMap.toString());
}
outKey.set(recordMap.get(sumField));
outValue.set("1");
} @Override
protected void cleanup(Context context) throws IOException, InterruptedException {
context.write(outKey, outValue);
}
}

3、Reducer

package com.kangaroo.hadoop.reducer;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hive.hcatalog.data.schema.HCatSchema;
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import java.io.IOException; @SuppressWarnings("rawtypes")
public class AggregateReducer extends Reducer<Text, Text, Text, Text> {
protected static final Logger logger = LoggerFactory.getLogger(AggregateReducer.class);
HCatSchema schema;
Text outKey;
Text outValue; @Override
protected void setup(Context context) throws IOException, InterruptedException {
schema = HCatInputFormat.getTableSchema(context.getConfiguration());
} @Override
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException,InterruptedException {
outKey.set(key);
int sum = 0;
for (Text value : values) {
sum += Integer.parseInt(value.toString());
}
outValue.set(String.valueOf(sum));
} protected void cleanup(Context context) throws IOException, InterruptedException {
context.write(outKey, outValue);
}
}

4、propertyUtil

package com.kangaroo.hadoop.utils;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.UnsupportedEncodingException;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties; import java.io.IOException;
import java.io.InputStream;
import java.util.Properties; public class PropertiesUtil {
private String filePath; public PropertiesUtil() {
this.filePath = "configure.properties";
} public PropertiesUtil(String filePath) {
this.filePath = filePath;
} public Properties getProperties() throws IOException {
Properties prop;
InputStream inStream = null;
try {
inStream = PropertiesUtil.class.getClassLoader()
.getResourceAsStream(this.filePath);
prop = new Properties();
prop.load(inStream); return prop;
} finally {
if (inStream != null)
inStream.close();
}
}
}

5、配置

mapred.job.queue.name=root.XXX
mapred.jar=./XXX.jar
mapred.map.tasks=300
mapred.reduce.tasks=100
#mapred.map.capacity=1
#mapred.reduce.capacity=1
mapred.job.priority=HIGH
mapred.job.name=XXX

Hadoop通过HCatalog编写Mapreduce任务访问hive库中schema数据的更多相关文章

  1. 如何将hive表中的数据导出

    近期经常将现场的数据带回公司测试,所以写下该文章,梳理一下思路. 1.首先要查询相应的hive表,比如我要将c_cons这张表导出,我先查出hive中是否有这张表. 查出数据,证明该表在hive中存在 ...

  2. 访问cv::Mat中的数据时遇到的指针类型问题

    在用Opencv的时候由于下图原本的图像尺寸是1111*1111,要进行resize,代码如下: cv::Mat img = cv::imread("//Users//apple//td3/ ...

  3. 【Hadoop测试程序】编写MapReduce测试Hadoop环境

    我们使用之前搭建好的Hadoop环境,可参见: <[Hadoop环境搭建]Centos6.8搭建hadoop伪分布模式>http://www.cnblogs.com/ssslinppp/p ...

  4. 在hadoop上进行编写mapreduce程序,统计关键词在text出现次数

    mapreduce的处理过程分为2个阶段,map阶段,和reduce阶段.在要求统计指定文件里的全部单词的出现次数时. map阶段把每一个关键词写到一行上以逗号进行分隔.并初始化数量为1(同样的单词h ...

  5. HBase结合MapReduce批量导入(HDFS中的数据导入到HBase)

    HBase结合MapReduce批量导入 package hbase; import java.text.SimpleDateFormat; import java.util.Date; import ...

  6. 批量查询hive库中所有表的count

    一.准备文件 mkdir /query_hive_table_count touch query_db_name_table touch query_table_result.txt 二.编辑文件 2 ...

  7. 小记---------spark组件与其他组件的比较 spark/mapreduce ;spark sql/hive ; spark streaming/storm

    Spark与Hadoop的对比   Scala是Spark的主要编程语言,但Spark还支持Java.Python.R作为编程语言 Hadoop的编程语言是Java    

  8. hive上传下载数据

    ------------------------------------------read me--方式1:适用于工具传输--方式2:适用于手动临时性传输---------------------- ...

  9. Hive扩展功能(三)--使用UDF函数将Hive中的数据插入MySQL中

    软件环境: linux系统: CentOS6.7 Hadoop版本: 2.6.5 zookeeper版本: 3.4.8 主机配置: 一共m1, m2, m3这五部机, 每部主机的用户名都为centos ...

随机推荐

  1. wait与sellp方法区别

    Java Thread(线程)案例详解sleep和wait的区别    上次对Java Thread有了总体的概述与总结,当然大多都是理论上的,这次我将详解Thread中两个常用且容易疑惑的方法.并通 ...

  2. 团队作业8——第二次项目冲刺(Beta阶段)Day7——5.26

    展开圆桌式会议: 会议内容:1.汇总BETA阶段的成果.2.针对BETA阶段的大家的获得的收获进行了讨论.3.对整个团队项目的过程进行了总结.每个人的工作分配: 队员 今日任务 贡献比 林燕 做最后测 ...

  3. 团队作业8——第二次项目冲刺(Beta阶段)Day5--5.23

    1.提供当天站立式会议照片一张 2.会议内容 讨论已完成的功能 对于界面,谈谈各自的看法 商定测试计划 用户需求进一步调研 3.工作安排 队员 今日任务 明日任务 贡献比 林燕 测试运行效果 根据测试 ...

  4. 【Beta阶段】第四次scrum meeting

    Coding/OSChina 地址 1. 会议内容 学号 主要负责的方向 昨日任务 昨日任务完成进度 接下去要做 99 PM 查找适合的素材模块,和105一起把手势功能连接到APP上 100% 查阅换 ...

  5. 201521123076 《Java程序设计》第7周学习总结

    1. 本周学习总结 2. 书面作业 Q1.ArrayList代码分析 1.1 解释ArrayList的contains源代码 A:先上源代码: public boolean contains(Obje ...

  6. 201521123073 《Java程序设计》第6周学习总结

    1. 本章学习总结 2. 书面作业 1.clone方法 1.1 Object对象中的clone方法是被protected修饰,在自定义的类中覆盖clone方法时需要注意什么? 1.2 自己设计类时,一 ...

  7. eclipse : java项目中的web.xml( Deployment Descriptor 部署描述文件 )配置说明

    context-param.listener.filter.servlet  首先可以肯定的是,加载顺序与它们在 web.xml 配置文件中的先后顺序无关.即不会因为 filter 写在 listen ...

  8. SimpleRpc-网络事件响应Reactor设计模式

    前言 这篇文章主要介绍整个框架用到的最核的一个设计模式:反应器模式.这个设计模式可以在<面向对象的软件架构>中详细了解,没有这本书的小伙伴不要急,我通过咱们的SimpleRpc来告诉大家这 ...

  9. [UIKit学习]06.懒加载,模型,自定义代码段,重写构造方法

    懒加载 在get中加载,且只加载一次 - (NSArray *)shops { if (_shops == nil) { NSString *file = [[NSBundle mainBundle] ...

  10. angular directive自定义指令

    先来看一下自定义指令的写法 app.directive('', ['', function(){ // Runs during compile return { // name: '', // pri ...