https://github.com/apache/spark/tree/master/core/src/main/scala/org/apache/spark/network

https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/network/BlockTransferService.scala

https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/network/BlockDataManager.scala

  1. package org.apache.spark.network
  2.  
  3. import scala.reflect.ClassTag
  4.  
  5. import org.apache.spark.network.buffer.ManagedBuffer
  6. import org.apache.spark.storage.{BlockId, StorageLevel}
  7.  
  8. private[spark]
  9. trait BlockDataManager {
  10.  
  11. /**
  12. * Interface to get local block data. Throws an exception if the block cannot be found or
  13. * cannot be read successfully.
  14. */
  15. def getBlockData(blockId: BlockId): ManagedBuffer
  16.  
  17. /**
  18. * Put the block locally, using the given storage level.
  19. *
  20. * Returns true if the block was stored and false if the put operation failed or the block
  21. * already existed.
  22. */
  23. def putBlockData(
  24. blockId: BlockId,
  25. data: ManagedBuffer,
  26. level: StorageLevel,
  27. classTag: ClassTag[_]): Boolean
  28.  
  29. /**
  30. * Release locks acquired by [[putBlockData()]] and [[getBlockData()]].
  31. */
  32. def releaseLock(blockId: BlockId, taskAttemptId: Option[Long]): Unit
  33. }
  1. package org.apache.spark.network
  2.  
  3. import java.io.Closeable
  4. import java.nio.ByteBuffer
  5.  
  6. import scala.concurrent.{Future, Promise}
  7. import scala.concurrent.duration.Duration
  8. import scala.reflect.ClassTag
  9.  
  10. import org.apache.spark.internal.Logging
  11. import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer, NioManagedBuffer}
  12. import org.apache.spark.network.shuffle.{BlockFetchingListener, ShuffleClient, TempFileManager}
  13. import org.apache.spark.storage.{BlockId, StorageLevel}
  14. import org.apache.spark.util.ThreadUtils
  15.  
  16. private[spark]
  17. abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {
  18.  
  19. /**
  20. * Initialize the transfer service by giving it the BlockDataManager that can be used to fetch
  21. * local blocks or put local blocks.
  22. */
  23. def init(blockDataManager: BlockDataManager): Unit
  24.  
  25. /**
  26. * Tear down the transfer service.
  27. */
  28. def close(): Unit
  29.  
  30. /**
  31. * Port number the service is listening on, available only after [[init]] is invoked.
  32. */
  33. def port: Int
  34.  
  35. /**
  36. * Host name the service is listening on, available only after [[init]] is invoked.
  37. */
  38. def hostName: String
  39.  
  40. /**
  41. * Fetch a sequence of blocks from a remote node asynchronously,
  42. * available only after [[init]] is invoked.
  43. *
  44. * Note that this API takes a sequence so the implementation can batch requests, and does not
  45. * return a future so the underlying implementation can invoke onBlockFetchSuccess as soon as
  46. * the data of a block is fetched, rather than waiting for all blocks to be fetched.
  47. */
  48. override def fetchBlocks(
  49. host: String,
  50. port: Int,
  51. execId: String,
  52. blockIds: Array[String],
  53. listener: BlockFetchingListener,
  54. tempFileManager: TempFileManager): Unit
  55.  
  56. /**
  57. * Upload a single block to a remote node, available only after [[init]] is invoked.
  58. */
  59. def uploadBlock(
  60. hostname: String,
  61. port: Int,
  62. execId: String,
  63. blockId: BlockId,
  64. blockData: ManagedBuffer,
  65. level: StorageLevel,
  66. classTag: ClassTag[_]): Future[Unit]
  67.  
  68. /**
  69. * A special case of [[fetchBlocks]], as it fetches only one block and is blocking.
  70. *
  71. * It is also only available after [[init]] is invoked.
  72. */
  73. def fetchBlockSync(
  74. host: String,
  75. port: Int,
  76. execId: String,
  77. blockId: String,
  78. tempFileManager: TempFileManager): ManagedBuffer = {
  79. // A monitor for the thread to wait on.
  80. val result = Promise[ManagedBuffer]()
  81. fetchBlocks(host, port, execId, Array(blockId),
  82. new BlockFetchingListener {
  83. override def onBlockFetchFailure(blockId: String, exception: Throwable): Unit = {
  84. result.failure(exception)
  85. }
  86. override def onBlockFetchSuccess(blockId: String, data: ManagedBuffer): Unit = {
  87. data match {
  88. case f: FileSegmentManagedBuffer =>
  89. result.success(f)
  90. case _ =>
  91. val ret = ByteBuffer.allocate(data.size.toInt)
  92. ret.put(data.nioByteBuffer())
  93. ret.flip()
  94. result.success(new NioManagedBuffer(ret))
  95. }
  96. }
  97. }, tempFileManager)
  98. ThreadUtils.awaitResult(result.future, Duration.Inf)
  99. }
  100.  
  101. /**
  102. * Upload a single block to a remote node, available only after [[init]] is invoked.
  103. *
  104. * This method is similar to [[uploadBlock]], except this one blocks the thread
  105. * until the upload finishes.
  106. */
  107. def uploadBlockSync(
  108. hostname: String,
  109. port: Int,
  110. execId: String,
  111. blockId: BlockId,
  112. blockData: ManagedBuffer,
  113. level: StorageLevel,
  114. classTag: ClassTag[_]): Unit = {
  115. val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag)
  116. ThreadUtils.awaitResult(future, Duration.Inf)
  117. }
  118. }

spark通信原理的更多相关文章

  1. Spark Shuffle 堆外内存溢出问题与解决(Shuffle通信原理)

    Spark Shuffle 堆外内存溢出问题与解决(Shuffle通信原理) http://xiguada.org/spark-shuffle-direct-buffer-oom/ 问题描述 Spar ...

  2. [Spark内核] 第32课:Spark Worker原理和源码剖析解密:Worker工作流程图、Worker启动Driver源码解密、Worker启动Executor源码解密等

    本課主題 Spark Worker 原理 Worker 启动 Driver 源码鉴赏 Worker 启动 Executor 源码鉴赏 Worker 与 Master 的交互关系 [引言部份:你希望读者 ...

  3. Spark核心技术原理透视一(Spark运行原理)

    在大数据领域,只有深挖数据科学领域,走在学术前沿,才能在底层算法和模型方面走在前面,从而占据领先地位. Spark的这种学术基因,使得它从一开始就在大数据领域建立了一定优势.无论是性能,还是方案的统一 ...

  4. spark核心原理

    spark运行结构图如下: spark基本概念 应用程序(application):用户编写的spark应用程序,包含驱动程序(Driver)和分布在集群中多个节点上运行的Executor代码,在执行 ...

  5. Spark集群基础概念 与 spark架构原理

    一.Spark集群基础概念 将DAG划分为多个stage阶段,遵循以下原则: 1.将尽可能多的窄依赖关系的RDD划为同一个stage阶段. 2.当遇到shuffle操作,就意味着上一个stage阶段结 ...

  6. Spark Worker原理和源码剖析解密:Worker工作流程图、Worker启动Driver源码解密、Worker启动Executor源码解密等

    本课主题 Spark Worker 原理 Worker 启动 Driver 源码鉴赏 Worker 启动 Executor 源码鉴赏 Worker 与 Master 的交互关系 Spark Worke ...

  7. 基于web的IM软件通信原理分析

    关于IM(InstantMessaging)即时通信类软件(如微信,QQ),大多数都是桌面应用程序或者native应用较为流行,而网上关于原生IM或桌面IM软件类的通信原理介绍也较多,此处不再赘述.而 ...

  8. Socket 通信原理(Android客户端和服务器以TCP&&UDP方式互通)

    转载地址:http://blog.csdn.net/mad1989/article/details/9147661 ZERO.前言 有关通信原理内容是在网上或百科整理得到,代码部分为本人所写,如果不当 ...

  9. SSL 通信原理及Tomcat SSL 配置

    SSL 通信原理及Tomcat SSL 双向配置 目录1 参考资料 .................................................................. ...

随机推荐

  1. 九度oj 题目1090:路径打印

    题目描述: 给你一串路径,譬如:a\b\c a\d\e b\cst d\你把这些路径中蕴含的目录结构给画出来,子目录直接列在父目录下面,并比父目录向右缩一格,就像这样:a   b     c   d  ...

  2. 安卓ImageView.src设置图片拉伸、填满控件的方法

    代码改变世界 安卓ImageView.src设置图片拉伸.填满控件的方法 需要给你的ImageView布局加上Android:adjustViewBounds="true"

  3. 2013 Asia acm Hangzhou Regional Contest 杭州现场赛

     B Stealing Harry Potter's Precious 题目大意:给定一个n*m的地图,某些点可以走,某些点可以走某些点不可以走,给定一个起点,又给出了k个点k<=4,要求从起点 ...

  4. 算法复习——平面分治(hud1007)

    题目: 问题描述 : Have you ever played quoit in a playground? Quoit is a game in which flat rings are pitch ...

  5. HashTable的构造函数有哪些

    HashTable:在并发的环境下,使用synchronized将整张表锁住: HashTable构造函数有: public Hashtable(int initialCapacity, float ...

  6. bzoj 1818 Cqoi2010 内部白点 扫描线

    [Cqoi2010]内部白点 Time Limit: 10 Sec  Memory Limit: 64 MBSubmit: 1126  Solved: 530[Submit][Status][Disc ...

  7. java 数据库连接的几个步骤

    Class.forName("oracle.jdbc.driver.OracleDriver"); String url = "jdbc:oracle:thin:@你的主 ...

  8. 洛谷 [P2148] E&G

    SG函数的应用 首先每一组都是独立的,所以我们可以求出每一组的SG值异或出来. 那么怎么求每一组的SG值呢,网上的题解都是打表找规律,但其实这个规律是可以证明的 先看规律: x为奇数,y为奇数:SG= ...

  9. NOJ 1116 哈罗哈的大披萨 【淡蓝】 [状压dp+各种优化]

    我只能说,珍爱生命,远离卡常数的题...感谢陈老师和蔡神,没有他们,,,我调一个星期都弄不出来,,,, 哈罗哈的大披萨 [淡蓝] 时间限制(普通/Java) : 1000 MS/ 3000 MS   ...

  10. 词法分析器 /c++实现

    #include<iostream> #include<string> #include<vector> #include<map> #include& ...