org.apache.hadoop.fs-BlockLocation
工具类吧
package org.apache.hadoop.fs; import org.apache.hadoop.io.*;
//IO包下的类还没涉及到。遇到一个分析一个。
import java.io.*; /*
* A BlockLocation lists hosts, offset and length
* of block.
*
*/
//记录block的元数据信息,如所在host,长度和偏移量
public class BlockLocation implements Writable {
//针对集群块位置的类
static { // register a ctor
WritableFactories.setFactory
(BlockLocation.class,
new WritableFactory() {
public Writable newInstance() { return new BlockLocation(); }
});
}
//注册了一个Writable子类BlockLocation的工厂。内部类。详细可看http://book.2cto.com/201305/21915.html
private String[] hosts; //hostnames of datanodes
//节点的主机名数组
private String[] names; //hostname:portNumber of datanodes
//节点的名称数组。名称的格式。
private String[] topologyPaths; // full path name in network topology
//节点在网络拓扑结构中的地址
private long offset; //offset of the of the block in the file
//块在文件中的偏移量
private long length;
//块长度 /**
* Default Constructor
*/
public BlockLocation() {
this(new String[0], new String[0], 0L, 0L);
}
//默认构造方法
/**
* Constructor with host, name, offset and length
*/
public BlockLocation(String[] names, String[] hosts, long offset,
long length) {
if (names == null) {
this.names = new String[0];
} else {
this.names = names;
}
if (hosts == null) {
this.hosts = new String[0];
} else {
this.hosts = hosts;
}
this.offset = offset;
this.length = length;
this.topologyPaths = new String[0];
}
//根据名称主机偏移量长度初始化一个块对象
/**
* Constructor with host, name, network topology, offset and length
*/
public BlockLocation(String[] names, String[] hosts, String[] topologyPaths,
long offset, long length) {
this(names, hosts, offset, length);
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
}
//根据.........
/**
* Get the list of hosts (hostname) hosting this block
*/
public String[] getHosts() throws IOException {
if ((hosts == null) || (hosts.length == 0)) {
return new String[0];
} else {
return hosts;
}
}
//获得块的主机
/**
* Get the list of names (hostname:port) hosting this block
*/
public String[] getNames() throws IOException {
if ((names == null) || (names.length == 0)) {
return new String[0];
} else {
return this.names;
}
}
//。。。。
/**
* Get the list of network topology paths for each of the hosts.
* The last component of the path is the host.
*/
public String[] getTopologyPaths() throws IOException {
if ((topologyPaths == null) || (topologyPaths.length == 0)) {
return new String[0];
} else {
return this.topologyPaths;
}
}
//。。。。。
/**
* Get the start offset of file associated with this block
*/
public long getOffset() {
return offset;
}
//。。。。。
/**
* Get the length of the block
*/
public long getLength() {
return length;
}
//。。。。。
/**
* Set the start offset of file associated with this block
*/
public void setOffset(long offset) {
this.offset = offset;
}
//。。。。。
/**
* Set the length of block
*/
public void setLength(long length) {
this.length = length;
}
//。。。。。
/**
* Set the hosts hosting this block
*/
public void setHosts(String[] hosts) throws IOException {
if (hosts == null) {
this.hosts = new String[0];
} else {
this.hosts = hosts;
}
}
//。。。。。
/**
* Set the names (host:port) hosting this block
*/
public void setNames(String[] names) throws IOException {
if (names == null) {
this.names = new String[0];
} else {
this.names = names;
}
}
//。。。。。
/**
* Set the network topology paths of the hosts
*/
public void setTopologyPaths(String[] topologyPaths) throws IOException {
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
}
//。。。。。
/**
* Implement write of Writable
*/
public void write(DataOutput out) throws IOException {
out.writeLong(offset);
out.writeLong(length);
out.writeInt(names.length);
for (int i=0; i < names.length; i++) {
Text name = new Text(names[i]);
name.write(out);
}
out.writeInt(hosts.length);
for (int i=0; i < hosts.length; i++) {
Text host = new Text(hosts[i]);
host.write(out);
}
out.writeInt(topologyPaths.length);
for (int i=0; i < topologyPaths.length; i++) {
Text host = new Text(topologyPaths[i]);
host.write(out);
}
}
//把块信息写到输出流中。用到了Writable子类Long,Int,Text的Write方法。序列化
/**
* Implement readFields of Writable
*/
public void readFields(DataInput in) throws IOException {
this.offset = in.readLong();
this.length = in.readLong();
int numNames = in.readInt();
this.names = new String[numNames];
for (int i = 0; i < numNames; i++) {
Text name = new Text();
name.readFields(in);
names[i] = name.toString();
}
int numHosts = in.readInt();
for (int i = 0; i < numHosts; i++) {
Text host = new Text();
host.readFields(in);
hosts[i] = host.toString();
}
int numTops = in.readInt();
Text path = new Text();
for (int i = 0; i < numTops; i++) {
path.readFields(in);
topologyPaths[i] = path.toString();
}
}
//把块信息从输入流中读出来。....。反序列化
public String toString() {
StringBuilder result = new StringBuilder();
result.append(offset);
result.append(',');
result.append(length);
for(String h: hosts) {
result.append(',');
result.append(h);
}
return result.toString();
}
//。。。。。。
}
org.apache.hadoop.fs-BlockLocation的更多相关文章
- 用java运行Hadoop程序报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.
用java运行Hadoop例程报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.所写代码如下: package ...
- spark-shell报错:Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream
环境: openSUSE42.2 hadoop2.6.0-cdh5.10.0 spark1.6.0-cdh5.10.0 按照网上的spark安装教程安装完之后,启动spark-shell,出现如下报错 ...
- 报错:Exception in thread "main" java.lang.NoClassDefFoundError: Lorg/apache/hadoop/fs/FileSystem
报错现象: Exception in thread "main" java.lang.NoClassDefFoundError: Lorg/apache/hadoop/fs/Fil ...
- Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/CanUnbuffer
在执行spark on hive 的时候在 sql.show()处报错 : Exception in thread "main" java.lang.NoClassDefFoun ...
- hive启动时报错 java.lang.IllegalArgumentException: java.net.URISyntaxException: Relative path in absolute URI: ${system:java.io.tmpdir%7D/$%7Bsystem:user.name%7D at org.apache.hadoop.fs.Path.initialize
错误提示信息如下 错误信息如下 [root@node1 bin]# ./hive Logging initialized -bin/lib/hive-common-.jar!/hive-log4j.p ...
- org.apache.hadoop.fs.FsUrlStreamHandlerFactory 在哪个jar包
org.apache.hadoop.fs.FsUrlStreamHandlerFactory在org.apache.hadoop类中,org.apache.hadoop在hadoop安装目录下.
- sparkOnYarn报错org.apache.hadoop.fs.FSDataInputStream
Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInpu ...
- Hadoop 3.1.2报错:xception in thread "main" org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "hdfs"
报错内容如下: Exception in thread "main" org.apache.hadoop.fs.UnsupportedFileSystemException: No ...
- 你遇到了吗?Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.fs.FileAlreadyExistsException)
我在使用 Structured Streaming 的 ForeachWriter,写 HDFS 文件时,出现了这个异常 这个异常出现的原因是HDFS作为一个分布式文件系统,支持多线程读,但是不支持多 ...
- 错误Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream排查思路
spark1(默认CDH自带版本)不存在这个问题,主要是升级了spark2(CDHparcel升级)版本安装后需要依赖到spark1的旧配置去读取hadoop集群的依赖包. 1./etc/spark2 ...
随机推荐
- 【暑假】[实用数据结构]范围最小值问题(RMQ)
范围最小值问题: 提供操作: Query(L,R):计算min{AL ~ AR } Sparse-Table算法: 定义d[i][j]为从i开始长度为2j的一段元素的最小值.所以可以用递推的方法表示. ...
- 办公室网络二三事 - chunyu
开始的时候,我们办公室拉了两条家庭百兆宽带,两条宽带分别接到路由器的wan1/wan2口上,我们愉快的工作愉快的上网. 后来,再拉了一条十兆企业专线,接到了路由器的wan3口上面,配了一些静态路由,希 ...
- 删除MySQL重复数据
删除MySQL重复数据 项目背景 在最近做的一个linux性能采集项目中,发现线程的程序入库很慢,再仔细定位,发现数据库里面很多冗余数据.因为在采集中,对于同一台设备,同一个时间点应该只有一个数据,然 ...
- codeforces 630H (组合数学)
H - Benches Time Limit:500MS Memory Limit:65536KB 64bit IO Format:%I64d & %I64u Submit S ...
- 咏南CS多层插件式开发框架支持最新的DELPHI XE7
DATASNAP中间件: 中间件已经在好几个实际项目中应用,长时间运行异常稳定,可无人值守: 可编译环境:DELPHI XE5~DELPHI XE7,无需变动代码: 支持传统TCP/IP方式也支持RE ...
- PAC(Proxy Auto Config)代理自动配置文件的编写
Proxy Auto Config文件格式说明 PAC文件实际上是一个Script, 通过PAC我们可以让系统根据情况判断使用哪一个Proxy来访问目标网址, 这样做的好处: 分散Proxy的流量,避 ...
- C#多线程开发
1.进程与线程的区别 通俗的讲,进行就是任务管理器中进行列表中看到的正在运行的程序,它是一个动态的概念,活动的实体. 线程是程序执行流的最小单元,是线程中一个实体,是系统独立调度和分派CPU基本单位. ...
- Python3爬取百度百科(配合PHP)
用PHP写了一个网页,可以获取百度百科词条.源代码已分享至github:https://github.com/1049451037/xiaobaike/tree/master 那么通过Python来爬 ...
- 我的第一个javascript网页作业
1: <html> 2: <title> 3: 4: </title> 5: <body> 6: <style type="text ...
- Caching in ASP.NET MVC
The caching options available in ASP.NET MVC applications don’t come from the ASP.NET MVC Framework, ...