scala-spark练手--dataframe数据可视化初稿
成品:http://www.cnblogs.com/drawwindows/p/5640606.html 初稿:
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{Logging, SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, _}
import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path} import scala.collection.mutable.ArrayBuffer object DataFrameVisiualize extends Logging { def runforstatistic(hiveContext: HiveContext, params: JSONObject) = {
val arr = params.getJSONArray("targetType")
var i = 0
while( i < arr.size()){
val obj = arr.getJSONObject(i)
if("dataset".equalsIgnoreCase(obj.getString("targetType"))){
val tableNameKey = obj.getString("targetName")
val tableName = params.getString(tableNameKey)
val user = params.getString("user")
run(hiveContext, tableName, user)
}
i = i+1
}
} def run(hiveContext: HiveContext, tableName: String, user: String) = {
val pathParent = s"/user/$user/mlaas/tableStatistic/$tableName"
// val conf = new SparkConf().setAppName("DataFrameVisiualizeJob")
// val sc = new SparkContext(conf)
// val hiveContext = new HiveContext(sc)
// val sqlContext = new SQLContext(sc)
//0.获取DB的schema信息
val schemadf = hiveContext.sql("desc " + tableName)
//schema信息落地
val filePathSchema = pathParent + "/schemajson"
schemadf.write.mode(SaveMode.Overwrite).format("json").save(filePathSchema) //1.加载表到dataframe
val df = hiveContext.sql("select * from " + tableName)
//2.获取dataframe的describe信息,默认为获取到的都为数值型列
val dfdesc = df.describe()
// //3.描述信息落地
// val filePath = pathParent + "/describejson"
// des.write.mode(SaveMode.Overwrite).format("json").save(filePath)
// val dfdesc = sqlContext.read.format("json").load(filePath) //4.列信息区分为mathColArr 和 strColArr
val mathColArr = dfdesc.columns.filter(!_.equalsIgnoreCase("summary"))
val (colMin, colMax, colMean, colStddev, colMedian) = getDesfromDF(dfdesc, mathColArr)
val allColArr = df.columns val strColArr = allColArr.filter(!_.equalsIgnoreCase("summary")).diff(mathColArr) saveRecords(hiveContext, tableName, 100, pathParent + "/recordsjson")
val jsonobj = getAllStatistics(hiveContext, tableName, allColArr, strColArr, mathColArr, 10, colMin, colMax) jsonobj.put("colMin", colMin)
jsonobj.put("colMax", colMax)
jsonobj.put("colMean", colMean)
jsonobj.put("colStddev", colStddev)
jsonobj.put("colMedian", colMedian) val jsonStr = jsonobj.toString
val conf1 = new Configuration()
val fs = FileSystem.get(conf1)
val fileName = pathParent + "/jsonObj"
val path = new Path(fileName)
val hdfsOutStream = fs.create(path)
hdfsOutStream.writeBytes(jsonStr)
hdfsOutStream.flush()
hdfsOutStream.close()
// fs.close(); } def saveRecords(hiveContext: HiveContext, tableName: String, num: Int, filePath: String) : Unit = {
hiveContext.sql(s"select * from $tableName limit $num").write.mode(SaveMode.Overwrite).format("json").save(filePath)
}
/**
* 根据allCols, mathColArr, strColArr 三个数组,返回带有所有统计信息(除去已经根据describe获取到的)的dataframes。
* 返回的dataframe结果进行遍历,填充各个属性的值。
*/
def getAllStatistics(hiveContext: HiveContext, tableName: String, allColArr: Array[String], strColArr: Array[String], mathColArr: Array[String], partNum: Int, colMin: java.util.HashMap[String, Double], colMax: java.util.HashMap[String, Double]) :
JSONObject = {
val jsonobj = new JSONObject()
val sb = new StringBuffer()
sb.append("select ")
for(col <- allColArr){
sb.append(s"count(distinct($col)) as unique_$col , sum(case when $col is null then 1 else 0 end) as missing_$col, ")
}
sb.append(s"sum(1) as totalrows from $tableName")
val df = hiveContext.sql(sb.toString)
val colUnique = new java.util.HashMap[String, Long]//唯一值
val colMissing = new java.util.HashMap[String, Long]//缺失值
df.take(1).foreach(row => (jsonobj.put("totalrows", row.getAs[Long]("totalrows")),allColArr.foreach(col => (colUnique.put(col, row.getAs[Long]("unique_"+col)),colMissing.put(col, row.getAs[Long]("missing_"+col))) ) )) val dfArr = ArrayBuffer[DataFrame]()
val strHistogramSql = new StringBuffer()
strHistogramSql.append(s"""
SELECT tta.colName, tta.value, tta.num
FROM (
SELECT ta.colName, ta.value, ta.num, ROW_NUMBER() OVER (PARTITION BY ta.colName ORDER BY ta.num DESC) AS row
FROM (
""") var vergin = 0
for(col <- strColArr){
if(vergin == 1){
strHistogramSql.append(" UNION ALL ")
}
vergin = 1
strHistogramSql.append(s"""
SELECT 'StrHistogram_$col' AS colName, $col AS value, COUNT(1) AS num
FROM $tableName
GROUP BY $col """)
}
strHistogramSql.append(s"""
) ta
) tta
WHERE tta.row <= $partNum
""")
val dfStrHistogram = hiveContext.sql(strHistogramSql.toString)
dfArr.append(dfStrHistogram)
for(col <- mathColArr){
val df1 = hiveContext.sql(s"select 'Quartile_$col' as colName, ntil, max($col) as num from (select $col, ntile(4) OVER (order by $col)as ntil from $tableName) tt group by ntil ")
log.info("col is :" + col + ", min is :" + colMin.get(col) + ", max is : " + colMax.get(col))
//need toString first, then toDouble。 or:ClassCastException
val min = colMin.get(col).toString.toDouble
val max = colMax.get(col).toString.toDouble
val df2 = getHistogramMathDF(col, hiveContext, tableName, min, max, partNum)
dfArr.append(df1)
dfArr.append(df2)
}
val dfAll = dfArr.reduce(_.unionAll(_))
val allRows = dfAll.collect()
val mathColMapQuartile = new java.util.HashMap[String, Array[java.util.HashMap[String,Long]]] //四分位
val mathColMapHistogram = new java.util.HashMap[String, Array[java.util.HashMap[String,Long]]]//条形图
val strColMapHistogram = new java.util.HashMap[String, Array[java.util.HashMap[String,Long]]]//条形图
val (mathColMapQuartile1, mathColMapHistogram1, strColMapHistogram1) = readRows(allRows)
for(col <- strColArr){
strColMapHistogram.put(col,strColMapHistogram1.get(col).toArray[java.util.HashMap[String,Long]])
}
for(col <- mathColArr){
mathColMapQuartile.put(col,mathColMapQuartile1.get(col).toArray[java.util.HashMap[String,Long]])
mathColMapHistogram.put(col,mathColMapHistogram1.get(col).toArray[java.util.HashMap[String,Long]])
}
jsonobj.put("mathColMapQuartile", mathColMapQuartile)
jsonobj.put("mathColMapHistogram", mathColMapHistogram)
jsonobj.put("strColMapHistogram", strColMapHistogram)
jsonobj.put("colUnique", colUnique)
jsonobj.put("colMissing", colMissing)
jsonobj
}
def readRows(rows: Array[Row]) : (java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]] , java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]], java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]])={
val mathColMapQuartile = new java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]] //四分位
val mathColMapHistogram = new java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]]//条形图
val strColMapHistogram = new java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]]//条形图
rows.foreach( row => {
val colName = row.getAs[String]("colName")
if (colName.startsWith("StrHistogram")) {
val value = row.getAs[String](1)
val num = row.getAs[Long](2)
val map = new java.util.HashMap[String, Long]()
val col = colName.substring(colName.indexOf('_') + 1)
map.put(value, num)
val mapValue = strColMapHistogram.get(col)
if (mapValue == null) {
val mapValueNew = ArrayBuffer[java.util.HashMap[String, Long]]()
mapValueNew.append(map)
strColMapHistogram.put(col, mapValueNew)
} else {
mapValue.append(map)
strColMapHistogram.put(col, mapValue)
}
} else if (colName.toString.startsWith("Quartile")) {
val value = row.getAs[String](1)
val num = row.getAs[Long](2)
val map = new java.util.HashMap[String, Long]()
val col = colName.substring(colName.indexOf('_') + 1)
map.put(value, num)
val mapValue = mathColMapQuartile.get(col)
if (mapValue == null) {
val mapValueNew = ArrayBuffer[java.util.HashMap[String, Long]]()
mapValueNew.append(map)
mathColMapQuartile.put(col, mapValueNew)
} else {
mapValue.append(map)
mathColMapQuartile.put(col, mapValue)
}
} else if (colName.toString.startsWith("MathHistogram")) {
val value = row.getAs[String](1)
val num = row.getAs[Long](2)
val map = new java.util.HashMap[String, Long]()
val col = colName.substring(colName.indexOf('_') + 1)
map.put(value, num)
val mapValue = mathColMapHistogram.get(col)
if (mapValue == null) {
val mapValueNew = ArrayBuffer[java.util.HashMap[String, Long]]()
mapValueNew.append(map)
mathColMapHistogram.put(col, mapValueNew)
} else {
mapValue.append(map)
mathColMapHistogram.put(col, mapValue)
}
}
})
(mathColMapQuartile, mathColMapHistogram, strColMapHistogram)
}
/** 数值型的列的条形分布获取方法*/
def getHistogramMathDF(col : String, hiveContext: HiveContext, tableName: String, min: Double, max: Double, partNum: Int) : DataFrame = {
val len = (max - min) / partNum
log.info(s"len is : $len")
val sb = new StringBuffer()
sb.append(s"select $col, (case ")
val firstRight = min + len
sb.append(s" when ($col >= $min and $col <= $firstRight) then 1 ")
for (i <- 2 until (partNum + 1)) {
val left = min + len * (i - 1)
val right = min + len * i
sb.append(s" when ($col > $left and $col <= $right) then $i ")
}
sb.append(s" else 0 end ) as partNum from $tableName")
sb.insert(0, s"select 'MathHistogram_$col' as colName, partNum, count(1) as num from ( ")
sb.append(") temptableScala group by partNum")
log.info("getHistogram is: " + sb.toString)
val df = hiveContext.sql(sb.toString)
df
}
def getDesfromDF(dfdesc : DataFrame, mathColArr: Array[String]):
(java.util.HashMap[String, Double], java.util.HashMap[String, Double], java.util.HashMap[String, Double], java.util.HashMap[String, Double], java.util.HashMap[String, Double])= {
val allRows = dfdesc.collect()
val colMin = new java.util.HashMap[String, Double]//最小值
val colMax = new java.util.HashMap[String, Double]//最大值
val colMean = new java.util.HashMap[String, Double]//平均值
val colStddev = new java.util.HashMap[String, Double]//标准差
val colMedian = new java.util.HashMap[String, Double]//中位值
allRows.foreach(row => {
val mapKey = row.getAs[String]("summary")
for(col <- mathColArr){
if("mean".equalsIgnoreCase(mapKey)){
colMean.put(col, row.getAs[Double](col))
}else if("stddev".equalsIgnoreCase(mapKey)){
colStddev.put(col, row.getAs[Double](col))
}else if("min".equalsIgnoreCase(mapKey)){
log.info("col is " + col +", min is : "+ row.getAs[Double](col))
colMin.put(col, row.getAs[Double](col))
}else if("max".equalsIgnoreCase(mapKey)){
log.info("col is " + col +", max is : "+ row.getAs[Double](col))
colMax.put(col, row.getAs[Double](col))
}else{
colMedian.put(col, row.getAs[Double](col))
}
}
})
(colMin, colMax, colMean, colStddev, colMedian)
}
}
scala-spark练手--dataframe数据可视化初稿的更多相关文章
- Spark GraphX 的数据可视化
概述 Spark GraphX 本身并不提供可视化的支持, 我们通过第三方库 GraphStream 和 Breeze 来实现这一目标 详细 代码下载:http://www.demodashi.com ...
- 大数据技术之_27_电商平台数据分析项目_02_预备知识 + Scala + Spark Core + Spark SQL + Spark Streaming + Java 对象池
第0章 预备知识0.1 Scala0.1.1 Scala 操作符0.1.2 拉链操作0.2 Spark Core0.2.1 Spark RDD 持久化0.2.2 Spark 共享变量0.3 Spark ...
- 练手mysqlbinlog日志恢复数据(centos6.5 64,mysql5.1)
练手mysql bin log日志相关 系统是centos 6.5 64 阿里云的服务器 mysql版本5.1 1 如何开启bin-log日志? vi /etc/my.cnf [mysqld] log ...
- spark 将dataframe数据写入Hive分区表
从spark1.2 到spark1.3,spark SQL中的SchemaRDD变为了DataFrame,DataFrame相对于SchemaRDD有了较大改变,同时提供了更多好用且方便的API.Da ...
- python实现列表页数据的批量抓取练手练手的
python实现列表页数据的批量抓取,练手的,下回带分页的 #!/usr/bin/env python # coding=utf-8 import requests from bs4 import B ...
- Python--matplotlib 绘图可视化练手--折线图/条形图
最近学习matplotlib绘图可视化,感觉知识点比较多,边学习边记录. 对于数据可视化,个人建议Jupyter Notebook. 1.首先导包,设置环境 import pandas as pd i ...
- Spark入门之DataFrame/DataSet
目录 Part I. Gentle Overview of Big Data and Spark Overview 1.基本架构 2.基本概念 3.例子(可跳过) Spark工具箱 1.Dataset ...
- 使用bokeh-scala进行数据可视化
目录 前言 bokeh简介及胡扯 bokeh-scala基本代码 我的封装 总结 一.前言 最近在使用spark集群以及geotrellis框架(相关文章见http://www.cnbl ...
- 大数据基础知识问答----spark篇,大数据生态圈
Spark相关知识点 1.Spark基础知识 1.Spark是什么? UCBerkeley AMPlab所开源的类HadoopMapReduce的通用的并行计算框架 dfsSpark基于mapredu ...
随机推荐
- 关闭一个winform窗体刷新另外一个
例如Form1是你的主窗体,然后Form2是你的要关闭那个窗体,在Form1中SHOW FORM2的窗体那里加上一句f2.FormClosed += new FormClosedEventHandle ...
- 总结源码编译安装mysql
最近在学习源码编译安装LAMP.LNMP时,一直遇到一个难题,就是就是mysql无论怎么源码编译安装,到最后启动服务都提示"Starting MySQL.The server quit wi ...
- mysql 远程连接 2003 Can't connect to MySQL server (10060)
mysql server 端的端口被防火墙挡出,没有开放
- linux sed命令学习
. Sed简介 . 定址 . Sed命令 . 选项 . 元字符集 . 实例 . 脚本 . 小技巧 . Sed简介 sed是一种在线编辑器,它一次处理一行内容.处理时,把当前处理的行存储在临时缓冲区中, ...
- hdu 3944 DP? 组合数取模(Lucas定理+预处理+帕斯卡公式优化)
DP? Problem Description Figure 1 shows the Yang Hui Triangle. We number the row from top to bottom 0 ...
- javascript高级编程笔记02(基本概念)
ParseInt()函数: 由于Number函数在转换字符串时比较复杂而且不合理,我们常常转换字符串都用parseInt函数, Parseint函数规则: 忽略字符串前面的空格,直到找到第一个非空格字 ...
- Python之创建单元素tuple
tuple和list一样,可以包含 0 个.1个和任意多个元素. 包含多个元素的 tuple,前面我们已经创建过了. 包含 0 个元素的 tuple,也就是空tuple,直接用 ()表示: >& ...
- 利用Multipeer Connectivity框架进行WiFi传输-b
什么是Multipeer Connectivity? 在iOS7中,引入了一个全新的框架——Multipeer Connectivity(多点连接).利用Multipeer Connectivity框 ...
- SWFUpload 按钮显示问题
问题: 今天遇到一个这样的问题,我用Vs2010打开用SWFUpload上传文件的网页时,按钮显示不出来,试了好多方法,终于被我找到了! 解决方法: 原来是vs2010自带的Asp.net Devel ...
- 【转载】Java 升级到jdk7后DbVisualizer 6 启动空指针的处理方案
将JDK从6升级到了7(或从其他电脑移植DBV文件夹后),每当启动DbVisualizer 6的时候都会报空指针异常 在官网上找到了相关的方案,如下: In the DbVisualizer inst ...