package wikipedia

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.log4j.{Level,Logger} case class WikipediaArticle(title: String, text: String) {
/**
* @return Whether the text of this article mentions `lang` or not
* @param lang Language to look for (e.g. "Scala")
*/
def mentionsLanguage(lang: String): Boolean = text.split(' ').contains(lang)
} object WikipediaRanking {
// 设置日志
Logger.getLogger("org").setLevel(Level.ERROR) val langs = List(
"JavaScript", "Java", "PHP", "Python", "C#", "C++", "Ruby", "CSS",
"Objective-C", "Perl", "Scala", "Haskell", "MATLAB", "Clojure", "Groovy") val conf: SparkConf = new SparkConf()
val sc: SparkContext = new SparkContext("local[*]", "Wikipedia") // Hint: use a combination of `sc.textFile`, `WikipediaData.filePath` and `WikipediaData.parse`
val wikiRdd: RDD[WikipediaArticle] = sc.textFile(WikipediaData.filePath).map(WikipediaData.parse) /** Returns the number of articles on which the language `lang` occurs. 返回lang语言出现的文章篇数
* Hint1: consider using method `aggregate` on RDD[T].
* Hint2: consider using method `mentionsLanguage` on `WikipediaArticle`
*/
def occurrencesOfLang(lang: String, rdd: RDD[WikipediaArticle]): Int =
rdd.filter(_.mentionsLanguage(lang)).count().toInt /* (1) Use `occurrencesOfLang` to compute the ranking of the languages
* (`val langs`) by determining the number of Wikipedia articles that
* mention each language at least once. Don't forget to sort the
* languages by their occurrence, in decreasing order!
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*/
def rankLangs(langs: List[String], rdd: RDD[WikipediaArticle]): List[(String, Int)] = {
rdd.cache() // 允许数据存储在内存
langs.map(lang => (lang, occurrencesOfLang(lang, rdd))).sortBy(_._2).reverse
/*
对于langs的每一个元素找到包含它的文章篇数。
其中sortBy(_._2)指根据occurrencesOfLang(lang, rdd))来排序,
如果是sortBy(_._1)则根据lang来排序
默认从小到大排序,所以加上.reverse
*/
} /* Compute an inverted index of the set of articles, mapping each language
* to the Wikipedia pages in which it occurs.
*/
def makeIndex(langs: List[String], rdd: RDD[WikipediaArticle]): RDD[(String, Iterable[WikipediaArticle])] = {
val articles_Languages = rdd.flatMap(article => {
langs.filter(lang => article.mentionsLanguage(lang))
.map(lang => (lang, article))
})
articles_Languages.groupByKey
} /* (2) Compute the language ranking again, but now using the inverted index. Can you notice
* a performance improvement?
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*/
def rankLangsUsingIndex(index: RDD[(String, Iterable[WikipediaArticle])]): List[(String, Int)] =
index.mapValues(_.size).sortBy(-_._2).collect().toList /* (3) Use `reduceByKey` so that the computation of the index and the ranking are combined.
* Can you notice an improvement in performance compared to measuring *both* the computation of the index
* and the computation of the ranking? If so, can you think of a reason?
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*/
def rankLangsReduceByKey(langs: List[String], rdd: RDD[WikipediaArticle]): List[(String, Int)] = {
rdd.flatMap(article => {
langs.filter(article.mentionsLanguage) // 相当于langs.filter(lang => article.mentionsLanguage(lang)) 或者 langs.filter(article.mentionsLanguage(_))
.map((_, 1))
}).reduceByKey(_ + _)
.sortBy(_._2)
.collect()
.toList
.reverse
} def main(args: Array[String]) { /* Languages ranked according to (1) */
val langsRanked: List[(String, Int)] = timed("Part 1: naive ranking", rankLangs(langs, wikiRdd)) /* An inverted index mapping languages to wikipedia pages on which they appear */
def index: RDD[(String, Iterable[WikipediaArticle])] = makeIndex(langs, wikiRdd) /* Languages ranked according to (2), using the inverted index */
val langsRanked2: List[(String, Int)] = timed("Part 2: ranking using inverted index", rankLangsUsingIndex(index)) /* Languages ranked according to (3) */
val langsRanked3: List[(String, Int)] = timed("Part 3: ranking using reduceByKey", rankLangsReduceByKey(langs, wikiRdd)) /* Output the speed of each ranking */
println(timing)
sc.stop()
} val timing = new StringBuffer
def timed[T](label: String, code: => T): T = {
val start = System.currentTimeMillis()
val result = code
val stop = System.currentTimeMillis()
timing.append(s"Processing $label took ${stop - start} ms.\n")
result
}
}

Spark Week1 HomeWork的更多相关文章

  1. CentOS7 安装spark集群

    Spark版本 1.6.0 Scala版本 2.11.7 Zookeeper版本 3.4.7 配置虚拟机 3台虚拟机,sm,sd1,sd2 1. 关闭防火墙 systemctl stop firewa ...

  2. 【cs229-Lecture2】Linear Regression with One Variable (Week 1)(含测试数据和源码)

    从Ⅱ到Ⅳ都在讲的是线性回归,其中第Ⅱ章讲得是简单线性回归(simple linear regression, SLR)(单变量),第Ⅲ章讲的是线代基础,第Ⅳ章讲的是多元回归(大于一个自变量). 本文的 ...

  3. Spark小课堂Week1 Hello Spark

    Spark小课堂Week1 Hello Spark 看到Spark这个词,你的第一印象是什么? 这是一朵"火花",官方的定义是Spark是一个高速的.通用的.分布式计算系统!!! ...

  4. Week1 Team Homework #2 Introduction of team member with photos

    小组成员介绍 组长:黄剑锟       11061164 组员:顾泽鹏        11061160 组员:周辰光         11061154 组员:龚少波        11061167 组 ...

  5. 团队博客作业Week1 Team Homework #3软件工程在北航

    这次我们采访了一位大四的学姐,让她简单地谈了谈去年学习软件工程的经历和感受. 在完成软件工程大作业的过程中,由于计划安排与实际脱节,导致时间前松后紧,平均每周花在这门课上的时间大约有8个小时. 项目完 ...

  6. Week1 Team Homework #1: Study the projects done by previous student groups

      我们研究了学长的项目:百度3D地图API的调用.下面是我们对该项目的一些看法: 优点: 界面清晰 各类之间调用及其他关系容易理清. 缺点: 前段html代码过于冗杂,很多(div)块间的层次关系不 ...

  7. Week1 Team Homework #3: 软件工程在北航

    在组内成员的共同努力,我们采访了几个学长学姐,顺利完成任务.反馈信息如下: 平均每周花在这门课上的时间 平均写的代码总行数 学到的最有用的部分 最没用的部分 <软件工程>最应该改进的地方 ...

  8. Week1 Team Homework #2: Introduction of each team member

    王洛书 我是来自浙江的王洛书.热爱历史,热爱美食,热爱代码,热爱博物馆.很喜欢软件工程这门课的上课方式,也很喜欢组里的这些同学.希望能大家一起努力,在这门课上真正地收获能力上的提升!   陈睿翊

  9. Week1 Team Homework #1 from Z.XML-对于学长项目《shield star》的思考和看法

    试用了一下学长黄杨等人开发的<shield star>游戏.                      其实作为一个学弟,我对cocos2d-x引擎还算是比较了解,所以对于这样一款很“典型 ...

随机推荐

  1. Windows XP 每次开机都自动检测硬盘 解决办法(可以用HDDRegenerate修复坏道)

    Windows XP,每次开机都自动检测硬盘,之前正常关机,没有任何非法操作.Windows XP,每次开机都自动检测硬盘,之前正常关机,没有任何非法操作. 1.和硬盘的分区格式有关,FAT32格式在 ...

  2. Debug监视器(监视运行期程序通过API函数OutputDebugString输出的字符串)

    http://download.csdn.net/detail/zswang/207199

  3. DUI-分层窗口两种模式(SetLayeredWindowAttributes和UpdateLayeredWindow两种方法各有利弊)

    LayeredWindow提供两种模式: 1.使用SetLayeredWindowAttributes去设置透明度, 完成窗口的统一透明,此时窗口仍然收到PAINT消息, 其他应用跟普通窗口一样. 2 ...

  4. APP导航设计九法

    近期在设计APP原型,用EXCEL,用Axure.但无论工具如何,产品本身的界面布局和交互设计确实逃不掉的!网络中有关于APP导航设计的总结: 第一种:app标签导航  易用性:★★★★★   趣味性 ...

  5. hive Metastore contains multiple versions

    凌晨接到hive作业异常,hive版本为1.2.1,hadoop版本apache 2.7.1,元数据存储在mysql中,异常信息如下: Logging initialized using config ...

  6. ElasticSearch搜索引擎的入门实战

    1.ElasticSearch简介 引用自百度百科: ElasticSearch是一个基于Lucene的搜索服务器.它提供了一个分布式多用户能力的全文搜索引擎,基于RESTful web接口.Elas ...

  7. Free MP3 CD Ripper_缓冲区溢出远程代码执行_CVE-2019-9766漏洞复现

    Free MP3 CD Ripper_缓冲区溢出远程代码执行_CVE-2019-9766漏洞复现 一.漏洞描述 Free MP3 CD Ripper是一款音频格式转换器.Free MP3 CD Rip ...

  8. Java学习笔记——设计模式之六.原型模式(浅克隆和深克隆)

    That there's some good in this world, Mr. Frodo. And it's worth fighting for. 原型模式(prototype),用原型实例指 ...

  9. webpack 4.0 版本的简单使用

    webpack 4.0 学习指南 最近前端又要变天了,vue作者推出了vue-cli 3版本,并且里面使用了webpack 4. 但是webpack 3 和webpack 4 二者的使用方式完全不一样 ...

  10. TCP/IP 第三章

    1,ip协议不可靠.无连接特性介绍 不可靠:计算机A往计算机B发送数据报1,若途径的路由器缓存已满,或者ttl(time to live 生存周期)到了,则路由器直接丢弃数据包1,并产生icmp数据包 ...