create table hive_1(id string,name string ,gender string)
row format delimited fields terminated by ','
stored as TEXTFILE;

load data local inpath '/luozt/hive_001.txt' into table hive_1 ;

create EXTERNAL table hive_2(id string,name string ,gender string)
row format delimited fields terminated by ','
stored as TEXTFILE;

load data inpath '/luo/hive_001.txt' into table hive_2 ;
//查询记录数
count 'hive_1'
//清空表
truncate table log_struct;

//删除表
drop table log_struct;

create table partition_table
(name string ,salary float,gender string,level string)
partitioned by(dt string,dept string)
row format delimited fields terminated by ','
stored as TEXTFILE;

desc partition_table

show partitions partition_table;//查看分区表的分区信息
//给分区表插入数据
load data local inpath '/luozt/par.txt' into table partition_table partition(dt='2014-04-01',dept='yonyu');

添加分区:
alter table partition_table add partition(dt='2014-04-03',dept='yonyou3') location '/user/hive/warehouse/luo.db/partition_table/dt=2014-04-03/dept=yonyou3';

删除分区:
alter table partition_table drop partition(dt='2014-04-03',dept='yonyou4')

select * from partition_table where salary>7600;
//嵌套
from (select name,salary from partition_table)e select e.name,e.salary where e.salary>7600;

//in 的用法

select * from partition_table where salary in(7000,6700);

//case的用法
select name,salary,
case
when salary<6800 then 'L1'
when salary>6800 and salary <8000 then 'L2'
when salary>8100 then 'L3'
else 'L0'
end as salary_level
from partition_table;

//having的用法

select gender,sum(salary) from partition_table group by gender;

//练习join
create table group1 (user string,score int)
row format delimited fields terminated by ','
stored as TEXTFILE;

//
create table group_join (user string,class string)
row format delimited fields terminated by ','
stored as TEXTFILE;

//普通的join
select b.class,a.score from group1 a join group_join b on (a.user=b.user);
//有个表很小时用mapjoin(b) b为小表
select /*+MAPJOIN(b)*/ b.class,a.score from group1 a join group_join b on (a.user=b.user);
//left Semi join

//分组
select user ,sum(score) from group1 group by user; ----分组时select的字段要全部作为group字段
//优化
set hive.map.aggr=true

//order by
create table orderby_test (user string,class string,math int,english int)
row format delimited fields terminated by ','
stored as TEXTFILE;

//默认升序
select * from orderby_test order by math; --desc改为降序,若将set hive.mapred.mode=strict;则要加上limit

//sort by 不受set hive.mapred.mode=strict的影响 可以指定 set mapred.reduce.tasks=<number> sort by 只会在么给reduce上进行排序,reduce输出的数据时有序的,提高全局排序的效率

//union all hive不支持顶层union ,只能将union封装在子查询中,且必须为union的查询输出定义别名
select * from (select count(*) from group1 union all select count(*) from orderby_test) temp;

//记得这种用法
select name,height,mark
from
(select name,height,'0' as mark from a
union all
select name height,'1' as mark from b) t;

//索引
create table index_test(id int,name string) partitioned by (dt string) row format delimited fields terminated by ',' stored as TEXTFILE;
//先创建一个临时表
create table temp(id int,name string ,dt string ) row format delimited fields terminated by ',' stored as TEXTFILE;
//动态分区
set hive.exec.dynamic.partition.mode=nonstrict;
set hive.exec.dynamic.partition=true;

insert overwrite table index_test partition(dt) select id,name,dt from temp;

//索引 创建索引时要有partition 否则不行
create index index1 on table index_test(id) AS 'org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler' with DEFERRED REBUILD;

alter index index1 on index_test rebuild;

show index on index_test;
show partitions index_test;

//Bucket 桶 就是抽样

create table tb_tmp(id int,age int,name string,timeflag bigint) row format delimited fields terminated by ',' ;
create table tb_stu(id int,age int,name string,timeflag bigint) clustered by(id) sorted by (age) into 5 buckets row format delimited fields terminated by ','

insert into table tb_stu select id,age,name,timeflag from tb_tmp;

利用桶进行查询
select * from tb_stu tablesample(bucket 1 out of 5 on id);

//存储类型和复合数据类型
rcfile 是直接load不进去的 要用临时表insert进去

//array
create table log_array(ip string,uid array<bigint>) partitioned by (dt string) row format delimited fields terminated by ',' collection items terminated by '|' stored AS TEXTFILE;

load data local inpath '/luo/log_array.txt' into table log_array partition(dt=20150902);
//查询array中的值
select uid[1] from log_array;
select ip,size(uid) from log_array where dt=20150902;
select ip from log_array where dt=20150902 and array_contains(uid,4732974)

//map
create table log_map(ts string,ip string,type string,logtype string,request Map<string,string>,response Map<string,string>)
row format delimited fields terminated by '#' collection items terminated by '&' Map keys terminated by '=' stored as TEXTFILE;

//查询
select request['src'] from log_map;

//struct
create table log_struct(ip string,user struct<name:string,age:int>)
row format delimited fields terminated by ','
collection items terminated by '#'
stored as TEXTFILE;

数据:192.168.1.1,wow#23
192.168.1.1,wow#23
192.168.1.1,wow#23
192.168.1.1,wow#23
192.168.1.1,wow#23
192.168.1.1,wow#23

select user.name from log_struct;

hadoop-hive学习笔记的更多相关文章

  1. hive学习笔记之一:基本数据类型

    欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...

  2. hive学习笔记之三:内部表和外部表

    欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...

  3. hive学习笔记之四:分区表

    欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...

  4. hive学习笔记之五:分桶

    欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...

  5. hive学习笔记之六:HiveQL基础

    欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...

  6. hive学习笔记之七:内置函数

    欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...

  7. hive学习笔记之九:基础UDF

    欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...

  8. hive学习笔记之十:用户自定义聚合函数(UDAF)

    欢迎访问我的GitHub 这里分类和汇总了欣宸的全部原创(含配套源码):https://github.com/zq2599/blog_demos 本篇概览 本文是<hive学习笔记>的第十 ...

  9. hive学习笔记之十一:UDTF

    欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...

  10. Hadoop入门学习笔记---part4

    紧接着<Hadoop入门学习笔记---part3>中的继续了解如何用java在程序中操作HDFS. 众所周知,对文件的操作无非是创建,查看,下载,删除.下面我们就开始应用java程序进行操 ...

随机推荐

  1. UI控件之UIImageView

    UIImageView:图像视图,用于在应用程序中显示图片 UIImage:是将图片文件转换为程序中的图片对象 UIImageView是UIImage的载体 方法一:用此方法创建图片对象,会将图片ca ...

  2. Bürkert 流体控制系统 (8611 型通用调节器)

    Type Description High-Tech Made EasyThe new universal controller eCONTROL Type 8611 brings an essent ...

  3. python爬虫之html解析Beautifulsoup和Xpath

    Beautiifulsoup Beautiful Soup 是一个HTML/XML的解析器,主要的功能也是如何解析和提取 HTML/XML 数据.BeautifulSoup 用来解析 HTML 比较简 ...

  4. Java 内部类、静态类内部类

    问: 什么是内部类? 答: 内部类(Inner Class)就是在一个类的内部再定义一个类,与之对应包含内部类的类被称为外部类. 问: 为什么要将一个类定义在另外一个类内部呢? 答: 内部类主要作用如 ...

  5. Java 类及类的构造方法

    类 类是一个模子,确定对象将会拥有的特性(属性)和行为(方法). 类的特点 类时对象的类型 具有相同属性和方法的一组对象的集合 构造方法 作用就是对类进行初始化. 如果你没有定议任何构造方法的形式,J ...

  6. poj 1961 Period 【KMP-next前缀数组的应用】

    题目地址:http://poj.org/problem?id=1961 Sample Input 3 aaa 12 aabaabaabaab 0 Sample Output Test case #1 ...

  7. springmvc拦截器基本使用

    1.HandlerExecutionChain是一个执行链,当用户的请求到达DispatcherServlet的时候,DispatcherServlet会到HandlerMapping中查找对应的Ha ...

  8. Apache Phoenix基本操作-2

    1. 如何映射一个Phoenix的表到一个Hbase的表? 你可以通过Create table/create view DDL语句在一个已经存在的hbase表上创建一个Phoenix表或者视图.对于C ...

  9. HDU 6096 AC自动机

    n个字符串 m个询问 每个询问给出前后缀 并且不重合 问有多少个满足 m挺大 如果在线只能考虑logn的算法 官方题解:对n个串分别存正序倒序 分别按照字典序sort 每一个串就可以被化作一个点 那么 ...

  10. Compaction介绍

    Compaction介绍 Compaction是buffer->flush->merge的Log-Structured Merge-Tree模型的关键操作,主要起到如下几个作用: 1)合并 ...