spark通信原理
https://github.com/apache/spark/tree/master/core/src/main/scala/org/apache/spark/network
https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/network/BlockTransferService.scala
https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/network/BlockDataManager.scala
package org.apache.spark.network import scala.reflect.ClassTag import org.apache.spark.network.buffer.ManagedBuffer
import org.apache.spark.storage.{BlockId, StorageLevel} private[spark]
trait BlockDataManager { /**
* Interface to get local block data. Throws an exception if the block cannot be found or
* cannot be read successfully.
*/
def getBlockData(blockId: BlockId): ManagedBuffer /**
* Put the block locally, using the given storage level.
*
* Returns true if the block was stored and false if the put operation failed or the block
* already existed.
*/
def putBlockData(
blockId: BlockId,
data: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Boolean /**
* Release locks acquired by [[putBlockData()]] and [[getBlockData()]].
*/
def releaseLock(blockId: BlockId, taskAttemptId: Option[Long]): Unit
}
package org.apache.spark.network import java.io.Closeable
import java.nio.ByteBuffer import scala.concurrent.{Future, Promise}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.{BlockFetchingListener, ShuffleClient, TempFileManager}
import org.apache.spark.storage.{BlockId, StorageLevel}
import org.apache.spark.util.ThreadUtils private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging { /**
* Initialize the transfer service by giving it the BlockDataManager that can be used to fetch
* local blocks or put local blocks.
*/
def init(blockDataManager: BlockDataManager): Unit /**
* Tear down the transfer service.
*/
def close(): Unit /**
* Port number the service is listening on, available only after [[init]] is invoked.
*/
def port: Int /**
* Host name the service is listening on, available only after [[init]] is invoked.
*/
def hostName: String /**
* Fetch a sequence of blocks from a remote node asynchronously,
* available only after [[init]] is invoked.
*
* Note that this API takes a sequence so the implementation can batch requests, and does not
* return a future so the underlying implementation can invoke onBlockFetchSuccess as soon as
* the data of a block is fetched, rather than waiting for all blocks to be fetched.
*/
override def fetchBlocks(
host: String,
port: Int,
execId: String,
blockIds: Array[String],
listener: BlockFetchingListener,
tempFileManager: TempFileManager): Unit /**
* Upload a single block to a remote node, available only after [[init]] is invoked.
*/
def uploadBlock(
hostname: String,
port: Int,
execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Future[Unit] /**
* A special case of [[fetchBlocks]], as it fetches only one block and is blocking.
*
* It is also only available after [[init]] is invoked.
*/
def fetchBlockSync(
host: String,
port: Int,
execId: String,
blockId: String,
tempFileManager: TempFileManager): ManagedBuffer = {
// A monitor for the thread to wait on.
val result = Promise[ManagedBuffer]()
fetchBlocks(host, port, execId, Array(blockId),
new BlockFetchingListener {
override def onBlockFetchFailure(blockId: String, exception: Throwable): Unit = {
result.failure(exception)
}
override def onBlockFetchSuccess(blockId: String, data: ManagedBuffer): Unit = {
data match {
case f: FileSegmentManagedBuffer =>
result.success(f)
case _ =>
val ret = ByteBuffer.allocate(data.size.toInt)
ret.put(data.nioByteBuffer())
ret.flip()
result.success(new NioManagedBuffer(ret))
}
}
}, tempFileManager)
ThreadUtils.awaitResult(result.future, Duration.Inf)
} /**
* Upload a single block to a remote node, available only after [[init]] is invoked.
*
* This method is similar to [[uploadBlock]], except this one blocks the thread
* until the upload finishes.
*/
def uploadBlockSync(
hostname: String,
port: Int,
execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Unit = {
val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag)
ThreadUtils.awaitResult(future, Duration.Inf)
}
}
spark通信原理的更多相关文章
- Spark Shuffle 堆外内存溢出问题与解决(Shuffle通信原理)
Spark Shuffle 堆外内存溢出问题与解决(Shuffle通信原理) http://xiguada.org/spark-shuffle-direct-buffer-oom/ 问题描述 Spar ...
- [Spark内核] 第32课:Spark Worker原理和源码剖析解密:Worker工作流程图、Worker启动Driver源码解密、Worker启动Executor源码解密等
本課主題 Spark Worker 原理 Worker 启动 Driver 源码鉴赏 Worker 启动 Executor 源码鉴赏 Worker 与 Master 的交互关系 [引言部份:你希望读者 ...
- Spark核心技术原理透视一(Spark运行原理)
在大数据领域,只有深挖数据科学领域,走在学术前沿,才能在底层算法和模型方面走在前面,从而占据领先地位. Spark的这种学术基因,使得它从一开始就在大数据领域建立了一定优势.无论是性能,还是方案的统一 ...
- spark核心原理
spark运行结构图如下: spark基本概念 应用程序(application):用户编写的spark应用程序,包含驱动程序(Driver)和分布在集群中多个节点上运行的Executor代码,在执行 ...
- Spark集群基础概念 与 spark架构原理
一.Spark集群基础概念 将DAG划分为多个stage阶段,遵循以下原则: 1.将尽可能多的窄依赖关系的RDD划为同一个stage阶段. 2.当遇到shuffle操作,就意味着上一个stage阶段结 ...
- Spark Worker原理和源码剖析解密:Worker工作流程图、Worker启动Driver源码解密、Worker启动Executor源码解密等
本课主题 Spark Worker 原理 Worker 启动 Driver 源码鉴赏 Worker 启动 Executor 源码鉴赏 Worker 与 Master 的交互关系 Spark Worke ...
- 基于web的IM软件通信原理分析
关于IM(InstantMessaging)即时通信类软件(如微信,QQ),大多数都是桌面应用程序或者native应用较为流行,而网上关于原生IM或桌面IM软件类的通信原理介绍也较多,此处不再赘述.而 ...
- Socket 通信原理(Android客户端和服务器以TCP&&UDP方式互通)
转载地址:http://blog.csdn.net/mad1989/article/details/9147661 ZERO.前言 有关通信原理内容是在网上或百科整理得到,代码部分为本人所写,如果不当 ...
- SSL 通信原理及Tomcat SSL 配置
SSL 通信原理及Tomcat SSL 双向配置 目录1 参考资料 .................................................................. ...
随机推荐
- 【Luogu】P1383高级打字机
可持久化线段树模板题之一. 权当温习主席树模板 #include<cstdio> #include<cstdlib> #include<cctype> #defin ...
- 洛谷P3759 - [TJOI2017]不勤劳的图书管理员
Portal Description 给出一个\(1..n(n\leq5\times10^4)\)的排列\(\{a_n\}\)和数列\(\{w_n\}(w_i\leq10^5)\),进行\(m(m\l ...
- 类 this指针 const成员函数 std::string isbn() const {return bookNo;}
转载:http://www.cnblogs.com/little-sjq/p/9fed5450f45316cf35f4b1c17f2f6361.html C++ Primer 第07章 类 7.1.2 ...
- bzoj 2017 [Usaco2009 Nov]硬币游戏 动态规划
[Usaco2009 Nov]硬币游戏 Time Limit: 10 Sec Memory Limit: 162 MBSubmit: 431 Solved: 240[Submit][Status] ...
- 团伙(codevs 2597)
题目描述 Description 1920年的芝加哥,出现了一群强盗.如果两个强盗遇上了,那么他们要么是朋友,要么是敌人.而且有一点是肯定的,就是: 我朋友的朋友是我的朋友: 我敌人的敌人也是我的朋友 ...
- Nova 组件详解
本节开始,我们将详细讲解 Nova 的各个子服务. 前面架构概览一节知道 Nova 有若干 nova-* 的子服务,下面我们将依次学习最重要的几个.今天先讨论 nova-api 和 nova-cond ...
- Poi写文件时报java.io.IOException: Read error
使用POI报表时不停的报java.io.IOException: Read error,看网上是说关闭了InputStream所导致的,由于我的InputStream是读取和写入都是用的同一个,所以就 ...
- 在Fedora 22下安装配置RealVNC Server 5.2.3的经验总结
RealVNC是目前功能最全.性能最好的VNC商业软件套件,很多时候为了确保性能和功能的统一,还是大量地在使用RealVNC.最近在Fedora 22工作站上安装RealVNC Server 5.2. ...
- ubuntu安装软件或upgrade出现 You might want to run 'apt-get -f install' to correct these
今天在ubuntu下安装任何软件都提示以下错误: You might want to run 'apt-get -f install' to correct these:The following p ...
- CODEVS_1033 蚯蚓的游戏问题 网络流 最小费用流 拆点
原题链接:http://codevs.cn/problem/1033/ 题目描述 Description 在一块梯形田地上,一群蚯蚓在做收集食物游戏.蚯蚓们把梯形田地上的食物堆积整理如下: a(1,1 ...