spark sql01
package sql; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.SQLContext; /**
*
*/
public class DataFrameReadJsonOps2 { /**
* @param args
*/
public static void main(String[] args) {
//创建SparkConf用于读取系统配置信息并设置当前应用程序的名字
SparkConf conf = new SparkConf().setAppName("DataFrameOps").setMaster("local");
//创建JavaSparkContext对象实例作为整个Driver的核心基石
JavaSparkContext sc = new JavaSparkContext(conf);
//设置日志级别为WARN
sc.setLogLevel("WARN");
//创建SQLContext上下文对象用于SQL的分析
SQLContext sqlContext = new SQLContext(sc);
//创建Data Frame,可以简单的认为DataFrame是一张表
DataFrame df = sqlContext.read().json("c:/resources/people.json");
//select * from table
df.show();
//desc table
df.printSchema();
//select name from table
df.select(df.col("name")).show();
//select name, age+10 from table
df.select(df.col("name"), df.col("age").plus()).show();
//select * from table where age > 21
df.filter(df.col("age").gt()).show();
//select age, count(1) from table group by age
df.groupBy("age").count().show(); //df.groupBy(df.col("age")).count().show();
} }
//
//SLF4J: Class path contains multiple SLF4J bindings.
//SLF4J: Found binding in [jar:file:/E:/bigdata/spark-1.4.0-bin-hadoop2.6/lib/spark-assembly-1.4.0-hadoop2.6.0.jar!/org/slf4j/impl/StaticLoggerBinder.class]
//SLF4J: Found binding in [jar:file:/E:/bigdata/spark-1.4.0-bin-hadoop2.6/lib/spark-examples-1.4.0-hadoop2.6.0.jar!/org/slf4j/impl/StaticLoggerBinder.class]
//SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
//SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
//Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
//17/12/29 14:15:10 INFO SparkContext: Running Spark version 1.4.0
//17/12/29 14:15:24 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
//17/12/29 14:15:28 INFO SecurityManager: Changing view acls to: alamps
//17/12/29 14:15:28 INFO SecurityManager: Changing modify acls to: alamps
//17/12/29 14:15:28 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(alamps); users with modify permissions: Set(alamps)
//17/12/29 14:15:37 INFO Slf4jLogger: Slf4jLogger started
//17/12/29 14:15:39 INFO Remoting: Starting remoting
//17/12/29 14:15:44 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriver@172.18.3.7:55458]
//17/12/29 14:15:44 INFO Utils: Successfully started service 'sparkDriver' on port 55458.
//17/12/29 14:15:45 INFO SparkEnv: Registering MapOutputTracker
//17/12/29 14:15:46 INFO SparkEnv: Registering BlockManagerMaster
//17/12/29 14:15:46 INFO DiskBlockManager: Created local directory at C:\Users\alamps\AppData\Local\Temp\spark-cd3ecbc3-41b5-4d8b-8e78-8c2c368ce80b\blockmgr-660894dd-39d3-4c8a-bf25-ae1d3850953d
//17/12/29 14:15:46 INFO MemoryStore: MemoryStore started with capacity 467.6 MB
//17/12/29 14:15:47 INFO HttpFileServer: HTTP File server directory is C:\Users\alamps\AppData\Local\Temp\spark-cd3ecbc3-41b5-4d8b-8e78-8c2c368ce80b\httpd-106ce90e-d496-4e96-a383-b471aeb5a224
//17/12/29 14:15:47 INFO HttpServer: Starting HTTP Server
//17/12/29 14:15:48 INFO Utils: Successfully started service 'HTTP file server' on port 55464.
//17/12/29 14:15:48 INFO SparkEnv: Registering OutputCommitCoordinator
//17/12/29 14:15:49 INFO Utils: Successfully started service 'SparkUI' on port 4040.
//17/12/29 14:15:49 INFO SparkUI: Started SparkUI at http://172.18.3.7:4040
//17/12/29 14:15:49 INFO Executor: Starting executor ID driver on host localhost
//17/12/29 14:15:50 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 55483.
//17/12/29 14:15:50 INFO NettyBlockTransferService: Server created on 55483
//17/12/29 14:15:50 INFO BlockManagerMaster: Trying to register BlockManager
//17/12/29 14:15:50 INFO BlockManagerMasterEndpoint: Registering block manager localhost:55483 with 467.6 MB RAM, BlockManagerId(driver, localhost, 55483)
//17/12/29 14:15:50 INFO BlockManagerMaster: Registered BlockManager
//+----+-------+
//| age| name|
//+----+-------+
//|null|Michael|
//| 30| Andy|
//| 19| Justin|
//+----+-------+
//
//root
// |-- age: long (nullable = true)
// |-- name: string (nullable = true)
//
//+-------+
//| name|
//+-------+
//|Michael|
//| Andy|
//| Justin|
//+-------+
//
//+-------+----------+
//| name|(age + 10)|
//+-------+----------+
//|Michael| null|
//| Andy| 40|
//| Justin| 29|
//+-------+----------+
//
//+---+----+
//|age|name|
//+---+----+
//| 30|Andy|
//+---+----+
//
//+----+-----+
//| age|count|
//+----+-----+
//|null| 1|
//| 19| 1|
//| 30| 1|
//+----+-----+
spark sql01的更多相关文章
- Spark踩坑记——Spark Streaming+Kafka
[TOC] 前言 在WeTest舆情项目中,需要对每天千万级的游戏评论信息进行词频统计,在生产者一端,我们将数据按照每天的拉取时间存入了Kafka当中,而在消费者一端,我们利用了spark strea ...
- Spark RDD 核心总结
摘要: 1.RDD的五大属性 1.1 partitions(分区) 1.2 partitioner(分区方法) 1.3 dependencies(依赖关系) 1.4 compute(获取分区迭代列表) ...
- spark处理大规模语料库统计词汇
最近迷上了spark,写一个专门处理语料库生成词库的项目拿来练练手, github地址:https://github.com/LiuRoy/spark_splitter.代码实现参考wordmaker ...
- Hive on Spark安装配置详解(都是坑啊)
个人主页:http://www.linbingdong.com 简书地址:http://www.jianshu.com/p/a7f75b868568 简介 本文主要记录如何安装配置Hive on Sp ...
- Spark踩坑记——数据库(Hbase+Mysql)
[TOC] 前言 在使用Spark Streaming的过程中对于计算产生结果的进行持久化时,我们往往需要操作数据库,去统计或者改变一些值.最近一个实时消费者处理任务,在使用spark streami ...
- Spark踩坑记——初试
[TOC] Spark简介 整体认识 Apache Spark是一个围绕速度.易用性和复杂分析构建的大数据处理框架.最初在2009年由加州大学伯克利分校的AMPLab开发,并于2010年成为Apach ...
- Spark读写Hbase的二种方式对比
作者:Syn良子 出处:http://www.cnblogs.com/cssdongl 转载请注明出处 一.传统方式 这种方式就是常用的TableInputFormat和TableOutputForm ...
- (资源整理)带你入门Spark
一.Spark简介: 以下是百度百科对Spark的介绍: Spark 是一种与 Hadoop 相似的开源集群计算环境,但是两者之间还存在一些不同之处,这些有用的不同之处使 Spark 在某些工作负载方 ...
- Spark的StandAlone模式原理和安装、Spark-on-YARN的理解
Spark是一个内存迭代式运算框架,通过RDD来描述数据从哪里来,数据用那个算子计算,计算完的数据保存到哪里,RDD之间的依赖关系.他只是一个运算框架,和storm一样只做运算,不做存储. Spark ...
随机推荐
- xss 学习记录
反射型和持久型 一些简单的xss例子: <script>alert(/xss/)</script> 嵌入到textarea的,需要先关闭textarea标签 </text ...
- Java 输入/输出——重定向标准输入/输出
在System类中提供了如下三个重定向标准输入/输出方法. static void setErr(PrintStream err) Reassigns the "standard" ...
- python实现斐波那契数列
https://www.cnblogs.com/wolfshining/p/7662453.html 斐波那契数列即著名的兔子数列:1.1.2.3.5.8.13.21.34.…… 数列特点:该数列从第 ...
- Python生成器表达式
https://www.cnblogs.com/liu-shuai/p/6098218.html 简介: 生成器表达式并不真正的创建数字列表,而是返回一个生成器对象,此对象在每次计算出一个条目后,把这 ...
- 转:JDBC中关于PreparedStatement.setObject的一些细节说明
原文地址:https://blog.csdn.net/zhiyangxuzs/article/details/78657235 JDBC中PreparedStatement.setObject(ind ...
- scala-高阶函数
//1类似于lambda表达式的函数直接量====================== var get = (name: String) => { println(123 + name) } g ...
- kubernetes的Kubelet
1. kubelet简介 在kubernetes集群中,每个Node节点都会启动kubelet进程,用来处理Master节点下发到本节点的任务,管理Pod和其中的容器.kubelet会在API Ser ...
- django时间的时区问题(转)
add by zhj: 使用django时,如果设置USE_TZ=True,那django在数据库中存储的是0时区的时间:如果USE_TZ=False,那存储的是本地时间 原文:https://www ...
- 如何让html中的td文字只显示部分
以下笔记有待测试 ———————————————————— <table style="table-layout:fixed"> <tr> <td s ...
- QT4.5.3移植到hi3536
QT版本:qt-embedded-linux-opensource-src-4.5.31.添加交叉编译(1)copy qt-embedded-linux-opensource-src-4.5.3/mk ...