Spark安装目录

/Users/erichan/Garden/spark-1.4.0-bin-hadoop2.6
  • 基本测试
./bin/run-example org.apache.spark.examples.SparkPi
MASTER=local[20] ./bin/run-example org.apache.spark.examples.SparkPi

scala

import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._ /**
* A simple Spark app in Scala
*/
object ScalaApp { def main(args: Array[String]) {
val sc = new SparkContext("local[2]", "First Spark App")
val data = sc.textFile("data/UserPurchaseHistory.csv")
.map(line => line.split(","))
.map(purchaseRecord => (purchaseRecord(0), purchaseRecord(1), purchaseRecord(2)))
val numPurchases = data.count()
val uniqueUsers = data.map { case (user, product, price) => user }.distinct().count()
val totalRevenue = data.map { case (user, product, price) => price.toDouble }.sum()
val productsByPopularity = data
.map { case (user, product, price) => (product, 1) }
.reduceByKey(_ + _)
.collect()
.sortBy(-_._2) val mostPopular = productsByPopularity(0)
println("Total purchases: " + numPurchases)
println("Unique users: " + uniqueUsers)
println("Total revenue: " + totalRevenue)
println("Most popular product: %s with %d purchases".format(mostPopular._1, mostPopular._2))
sc.stop()
}
}

build.sbt

name := "scala-spark-app"

version := "1.0"

scalaVersion := "2.11.6"

libraryDependencies += "org.apache.spark" %% "spark-core" % "1.4.0"
erichan:scala-spark-app/ $ sbt run

java 8

import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.List;
public class JavaApp {
public static void main(String[] args) {
JavaSparkContext sc = new JavaSparkContext("local[2]", "First Spark App");
JavaRDD<String[]> data = sc.textFile("data/UserPurchaseHistory.csv").map(s -> s.split(","));
long numPurchases = data.count();
long uniqueUsers = data.map(strings -> strings[0]).distinct().count();
double totalRevenue = data.mapToDouble(strings -> Double.parseDouble(strings[2])).sum(); List<Tuple2<String, Integer>> pairs = data.mapToPair(
new PairFunction<String[], String, Integer>() {
@Override
public Tuple2<String, Integer> call(String[] strings) throws Exception {
return new Tuple2(strings[1], 1);
}
}
).reduceByKey((i1, i2) -> i1 + i2).collect();
pairs.sort((o1, o2) -> -(o1._2() - o2._2())); String mostPopular = pairs.get(0)._1();
int purchases = pairs.get(0)._2();
System.out.println("Total purchases: " + numPurchases);
System.out.println("Unique users: " + uniqueUsers);
System.out.println("Total revenue: " + totalRevenue);
System.out.println(String.format("Most popular product: %s with %d purchases", mostPopular, purchases));
sc.stop();
}
}

Maven pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion> <groupId>java-spark-app</groupId>
<artifactId>java-spark-app</artifactId>
<version>1.0</version> <dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>1.4.0</version>
</dependency>
</dependencies> <build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
</plugins>
</build>
</project>

python

from pyspark import SparkContext

sc = SparkContext("local[2]", "First Spark App")
data = sc.textFile("data/UserPurchaseHistory.csv").map(lambda line: line.split(",")).map(lambda record: (record[0], record[1], record[2]))
numPurchases = data.count()
uniqueUsers = data.map(lambda record: record[0]).distinct().count()
totalRevenue = data.map(lambda record: float(record[2])).sum()
products = data.map(lambda record: (record[1], 1.0)).reduceByKey(lambda a, b: a + b).collect()
mostPopular = sorted(products, key=lambda x: x[1], reverse=True)[0] print "Total purchases: %d" % numPurchases
print "Unique users: %d" % uniqueUsers
print "Total revenue: %2.2f" % totalRevenue
print "Most popular product: %s with %d purchases" % (mostPopular[0], mostPopular[1]) sc.stop()
cd /Users/erichan/Garden/spark-1.4.0-bin-hadoop2.6/bin
./spark-submit pythonapp.py

Spark机器学习1·编程入门(scala/java/python)的更多相关文章

  1. 梯度迭代树(GBDT)算法原理及Spark MLlib调用实例(Scala/Java/python)

    梯度迭代树(GBDT)算法原理及Spark MLlib调用实例(Scala/Java/python) http://blog.csdn.net/liulingyuan6/article/details ...

  2. 三种文本特征提取(TF-IDF/Word2Vec/CountVectorizer)及Spark MLlib调用实例(Scala/Java/python)

    https://blog.csdn.net/liulingyuan6/article/details/53390949

  3. Spark机器学习MLlib系列1(for python)--数据类型,向量,分布式矩阵,API

    Spark机器学习MLlib系列1(for python)--数据类型,向量,分布式矩阵,API 关键词:Local vector,Labeled point,Local matrix,Distrib ...

  4. Spark机器学习7·降维模型(scala&python)

    PCA(主成分分析法,Principal Components Analysis) SVD(奇异值分解法,Singular Value Decomposition) http://vis-www.cs ...

  5. 3000字编程入门--附带Java学习路线及视频

    Title: 编程入门 GitHub: BenCoper Reference: 尚硅谷-2019 Study: 文字版+视频+实战(第一个自学的网站) Explain: 文末附带Java学习视频以及项 ...

  6. 问题 1014: [编程入门]阶乘求和python):(本地测试正确;但提交不对!!??)求教

    问题 1014: [编程入门]阶乘求和 时间限制: 1Sec 内存限制: 128MB 提交: 27629 解决: 5450 题目描述 求Sn=1!+2!+3!+4!+5!+…+n!之值,其中n是一个数 ...

  7. spark Using MLLib in Scala/Java/Python

    Using MLLib in ScalaFollowing code snippets can be executed in spark-shell. Binary ClassificationThe ...

  8. 朴素贝叶斯算法原理及Spark MLlib实例(Scala/Java/Python)

    朴素贝叶斯 算法介绍: 朴素贝叶斯法是基于贝叶斯定理与特征条件独立假设的分类方法. 朴素贝叶斯的思想基础是这样的:对于给出的待分类项,求解在此项出现的条件下各个类别出现的概率,在没有其它可用信息下,我 ...

  9. 编程入门视频【 Python、PHP、ThinkPHP、Laravel、Mysql、微信小程序】

    免费分享 Python.PHP.ThinkPHP.Laravel.Mysql.微信小程序等学习视频 点击进入搜刮 免费分享 Python.PHP.ThinkPHP.Laravel.Mysql.微信小程 ...

随机推荐

  1. 微软公布带外安全更新MS14-068

     11月19日,微软公布了带外更新MS14-068以解决Windows 系统中的一个安全漏洞,该补丁安全等级为"严重". MS14-068 | Kerberos 中的漏洞可能同 ...

  2. scrollHeight、clientHeight、offsetHeight、scrollTop等的定义以及图解

    开发中经常遇到使用scrollHeight.scrollTop.clientHeight.offsetHeight等的情况,网上有众多关于这些属性的解释,但是并不全面和直观,现在将这些属性结合图例整理 ...

  3. 【python】计算器

    from __future__ import division import sys from math import * from PyQt4.QtCore import * from PyQt4. ...

  4. spotlight on windows 监控

    1. spotlight on windows 安装 下载 https://pan.baidu.com/s/1qYi3lec Spotlight大家可以从其官方网站(http://www.quest. ...

  5. http://blog.csdn.net/v_july_v/article/details/6543438

    本文转载至: http://blog.csdn.net/v_july_v/article/details/6543438 算法 程序员面试.算法研究.编程艺术.红黑树.数据挖掘5大经典原创系列集锦与总 ...

  6. iOS-常用宏定义

    下面我为大家提供一些常用的宏定义! 将这些宏定义 加入到.pch使用 再也不用 用一次写一次这么长的程序了 //-------------------获取设备大小------------------- ...

  7. memcache的内存管理机制

    Memcache使用了Slab Allocator的内存分配机制:按照预先规定的大小,将分配的内存分割成特定长度的块,以完全解决内存碎片问题Memcache的存储涉及到slab,page,chunk三 ...

  8. nginx tomcat https配置方案

    nginx目录下配置: ssl目录下 添加 证书和密码,如图 /etc/nginx/conf.d  下修改配置文件 HTTP域名的配置: ## Basic reverse proxy server # ...

  9. iOS ipad archives 时候 不显示icon

    设置一下 plist文件

  10. mysql返回字符串在另外一个字符串中第n次出现的方法。

    SELECT SUBSTRING_INDEX("迟到50分钟,早退15分钟","分钟",2); 返回:迟到50分钟,早退15