spark源码阅读之network(2)
在上节的解读中发现spark的源码中大量使用netty的buffer部分的api,该节将看到netty核心的一些api,比如channel:
privatestaticclassClientPool{
TransportClient[] clients;
Object[] locks;
publicClientPool(int size){
clients =newTransportClient[size];
locks =newObject[size];
for(int i =0; i < size; i++){
locks[i]=newObject();
}
}
publicTransportClient createClient(String remoteHost,int remotePort)throwsIOException{
// Get connection from the connection pool first.
// If it is not found or not active, create a new one.
finalInetSocketAddress address =newInetSocketAddress(remoteHost, remotePort);
// Create the ClientPool if we don't have it yet.
ClientPool clientPool = connectionPool.get(address);
if(clientPool ==null){
connectionPool.putIfAbsent(address,newClientPool(numConnectionsPerPeer));
clientPool = connectionPool.get(address);
}
int clientIndex = rand.nextInt(numConnectionsPerPeer);
TransportClient cachedClient = clientPool.clients[clientIndex];
if(cachedClient !=null&& cachedClient.isActive()){
logger.trace("Returning cached connection to {}: {}", address, cachedClient);
return cachedClient;
}
// If we reach here, we don't have an existing connection open. Let's create a new one.
// Multiple threads might race here to create new connections. Keep only one of them active.
synchronized(clientPool.locks[clientIndex]){
cachedClient = clientPool.clients[clientIndex];
if(cachedClient !=null){
if(cachedClient.isActive()){
logger.trace("Returning cached connection to {}: {}", address, cachedClient);
return cachedClient;
}else{
logger.info("Found inactive connection to {}, creating a new one.", address);
}
}
clientPool.clients[clientIndex]= createClient(address);
return clientPool.clients[clientIndex];
}
}
*/
publicTransportClient createUnmanagedClient(String remoteHost,int remotePort)
throwsIOException{
finalInetSocketAddress address =newInetSocketAddress(remoteHost, remotePort);
return createClient(address);
}
/** Create a completely new {@link TransportClient} to the remote address. */
privateTransportClient createClient(InetSocketAddress address)throwsIOException{
logger.debug("Creating new connection to "+ address);
Bootstrap bootstrap =newBootstrap();
bootstrap.group(workerGroup)
.channel(socketChannelClass)
// Disable Nagle's Algorithm since we don't want packets to wait
.option(ChannelOption.TCP_NODELAY,true)
.option(ChannelOption.SO_KEEPALIVE,true)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs())
.option(ChannelOption.ALLOCATOR, pooledAllocator);
finalAtomicReference<TransportClient> clientRef =newAtomicReference<TransportClient>();
finalAtomicReference<Channel> channelRef =newAtomicReference<Channel>();
bootstrap.handler(newChannelInitializer<SocketChannel>(){
@Override
publicvoid initChannel(SocketChannel ch){
TransportChannelHandler clientHandler = context.initializePipeline(ch);
clientRef.set(clientHandler.getClient());
channelRef.set(ch);
}
});
// Connect to the remote server
long preConnect =System.nanoTime();
ChannelFuture cf = bootstrap.connect(address);
if(!cf.awaitUninterruptibly(conf.connectionTimeoutMs())){
thrownewIOException(
String.format("Connecting to %s timed out (%s ms)", address, conf.connectionTimeoutMs()));
}elseif(cf.cause()!=null){
thrownewIOException(String.format("Failed to connect to %s", address), cf.cause());
}
TransportClient client = clientRef.get();
Channel channel = channelRef.get();
assert client !=null:"Channel future completed successfully with null client";
// Execute any client bootstraps synchronously before marking the Client as successful.
long preBootstrap =System.nanoTime();
logger.debug("Connection to {} successful, running bootstraps...", address);
try{
for(TransportClientBootstrap clientBootstrap : clientBootstraps){
clientBootstrap.doBootstrap(client, channel);
}
}catch(Exception e){// catch non-RuntimeExceptions too as bootstrap may be written in Scala
long bootstrapTimeMs =(System.nanoTime()- preBootstrap)/1000000;
logger.error("Exception while bootstrapping client after "+ bootstrapTimeMs +" ms", e);
client.close();
throwThrowables.propagate(e);
}
long postBootstrap =System.nanoTime();
logger.debug("Successfully created connection to {} after {} ms ({} ms spent in bootstraps)",
address,(postBootstrap - preConnect)/1000000,(postBootstrap - preBootstrap)/1000000);
return client;
}
privatefinalChannel channel;
privatefinalTransportResponseHandler handler;
@NullableprivateString clientId;
publicvoid fetchChunk(
long streamId,
finalint chunkIndex,
finalChunkReceivedCallback callback){
finalString serverAddr =NettyUtils.getRemoteAddress(channel);
finallong startTime =System.currentTimeMillis();
logger.debug("Sending fetch chunk request {} to {}", chunkIndex, serverAddr);
finalStreamChunkId streamChunkId =newStreamChunkId(streamId, chunkIndex);
handler.addFetchRequest(streamChunkId, callback);
channel.writeAndFlush(newChunkFetchRequest(streamChunkId)).addListener(
newChannelFutureListener(){
@Override
publicvoid operationComplete(ChannelFuture future)throwsException{
if(future.isSuccess()){
long timeTaken =System.currentTimeMillis()- startTime;
logger.trace("Sending request {} to {} took {} ms", streamChunkId, serverAddr,
timeTaken);
}else{
String errorMsg =String.format("Failed to send request %s to %s: %s", streamChunkId,
serverAddr, future.cause());
logger.error(errorMsg, future.cause());
handler.removeFetchRequest(streamChunkId);
channel.close();
try{
callback.onFailure(chunkIndex,newIOException(errorMsg, future.cause()));
}catch(Exception e){
logger.error("Uncaught exception in RPC response callback handler!", e);
}
}
}
});
}
publicvoid stream(finalString streamId,finalStreamCallback callback){
finalString serverAddr =NettyUtils.getRemoteAddress(channel);
finallong startTime =System.currentTimeMillis();
logger.debug("Sending stream request for {} to {}", streamId, serverAddr);
// Need to synchronize here so that the callback is added to the queue and the RPC is
// written to the socket atomically, so that callbacks are called in the right order
// when responses arrive.
synchronized(this){
handler.addStreamCallback(callback);
channel.writeAndFlush(newStreamRequest(streamId)).addListener(
newChannelFutureListener(){
@Override
publicvoid operationComplete(ChannelFuture future)throwsException{
if(future.isSuccess()){
long timeTaken =System.currentTimeMillis()- startTime;
logger.trace("Sending request for {} to {} took {} ms", streamId, serverAddr,
timeTaken);
}else{
String errorMsg =String.format("Failed to send request for %s to %s: %s", streamId,
serverAddr, future.cause());
logger.error(errorMsg, future.cause());
channel.close();
try{
callback.onFailure(streamId,newIOException(errorMsg, future.cause()));
}catch(Exception e){
logger.error("Uncaught exception in RPC response callback handler!", e);
}
}
}
});
}
}
publicbyte[] sendRpcSync(byte[] message,long timeoutMs){
finalSettableFuture<byte[]> result =SettableFuture.create();
sendRpc(message,newRpcResponseCallback(){
@Override
publicvoid onSuccess(byte[] response){
result.set(response);
}
@Override
publicvoid onFailure(Throwable e){
result.setException(e);
}
});
try{
return result.get(timeoutMs,TimeUnit.MILLISECONDS);
}catch(ExecutionException e){
throwThrowables.propagate(e.getCause());
}catch(Exception e){
throwThrowables.propagate(e);
}
}
spark源码阅读之network(2)的更多相关文章
- spark源码阅读之network(1)
spark将在1.6中替换掉akka,而采用netty实现整个集群的rpc的框架,netty的内存管理和NIO支持将有效的提高spark集群的网络传输能力,为了看懂这块代码,在网上找了两本书看< ...
- spark源码阅读之network(3)
TransportContext用来创建TransportServer和TransportclientFactory,同时使用TransportChannelHandler用来配置channel的pi ...
- Spark源码阅读之存储体系--存储体系概述与shuffle服务
一.概述 根据<深入理解Spark:核心思想与源码分析>一书,结合最新的spark源代码master分支进行源码阅读,对新版本的代码加上自己的一些理解,如有错误,希望指出. 1.块管理器B ...
- win7+idea+maven搭建spark源码阅读环境
1.参考. 利用IDEA工具编译Spark源码(1.60~2.20) https://blog.csdn.net/He11o_Liu/article/details/78739699 Maven编译打 ...
- spark源码阅读
根据spark2.2的编译顺序来确定源码阅读顺序,只阅读核心的基本部分. 1.common目录 ①Tags②Sketch③Networking④Shuffle Streaming Service⑤Un ...
- emacs+ensime+sbt打造spark源码阅读环境
欢迎转载,转载请注明出处,徽沪一郎. 概述 Scala越来越流行, Spark也愈来愈红火, 对spark的代码进行走读也成了一个很普遍的行为.不巧的是,当前java社区中很流行的ide如eclips ...
- spark源码阅读---Utils.getCallSite
1 作用 当该方法在spark内部代码中调用时,会返回当前调用spark代码的用户类的名称,以及其所调用的spark方法.所谓用户类,就是我们这些用户使用spark api的类. 2 内部实现 2.1 ...
- spark源码阅读--SparkContext启动过程
##SparkContext启动过程 基于spark 2.1.0 scala 2.11.8 spark源码的体系结构实在是很庞大,从使用spark-submit脚本提交任务,到向yarn申请容器,启 ...
- Spark源码阅读(1): Stage划分
Spark中job由action动作生成,那么stage是如何划分的呢?一般的解答是根据宽窄依赖划分.那么我们深入源码看看吧 一个action 例如count,会在多次runJob中传递,最终会到一个 ...
随机推荐
- C++输入流和输出流、缓冲区
一.C++输入流和输出流 输入和输出的概念是相对程序而言的. 键盘输入数据到程序叫标准输入,程序数据输出到显示器叫标准输出,标准输入和标准输出统称为标准I/O,文件的输入和输出叫文件I/O. cout ...
- 使用BasicDataSource连接池连接oracle数据库报错ORA-12505
先看连接池配置: <bean id="dataSource" class="org.apache.commons.dbcp.BasicDataSource" ...
- erlang的dict和maps模块
erlang在r17以后增加了map这个数据结构,在之前,类似map的需求用dict模块来实现,这里直接贴一下相关的操作 dict D = dict:new(). D1 = dict:store(k1 ...
- mysql复制原理与机制一
复制原理:复制需要二进制日志记录数据库上的改变 slave的IO线程复制把master上的Binary log读取到本地的relay log里SQL线程负责把relay log恢复到数据库数据里 sh ...
- 完全卸载oracle11g步骤:
完全卸载oracle11g步骤:1. 开始->设置->控制面板->管理工具->服务 停止所有Oracle服务.2. 开始->程序->Oracle - OraHome ...
- 字符串,字符数组(C/C++)
这个地方困惑我好久了,废话不多说 char c1[]="12345"; char *c2="12345"; string c3="12345" ...
- vue-cli中的ESlint配置文件eslintrc.js详解
本文讲解vue-cli脚手架根目录文件.eslintrc.js eslint简介 eslint是用来管理和检测js代码风格的工具,可以和编辑器搭配使用,如vscode的eslint插件 当有不符合配置 ...
- 如何查看apache加载了哪些模块
apache2/bin/apachectl -l 可以看到类似下面的结果: 这是编译时就已编译在apache中的模块,启动时自然会加载. 另外一部分,要看apach的配置文件(httpd.conf)的 ...
- js 正则匹配 小结
JS的正则表达式 rge.test(str) 检验目标对象中是否包含匹配模式,并相应的返回true或false rge.source str.search(rge) 将返回一个整数值,指明这个匹配 ...
- 浅析bootstrap原理及优缺点
网格系统的实现原理,是通过定义容器大小,平分12份(也有平分成24份或32份,但12份是最常见的),再调整内外边距,最后结合媒体查询,就制作出了强大的响应式网格系统 网格系统的实现原理,是通过定义 ...