Hadoop fs -put bandwidth 暴力版
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ // scalastyle:off println
package com.weibo.tools import java.io.{BufferedInputStream,FileInputStream}
import java.net.URI
import java.io.BufferedInputStream
import java.util.concurrent.TimeUnit import org.apache.hadoop.conf.{Configuration => hdfsConfig}
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.apache.hadoop.io.IOUtils import org.apache.spark.{SparkConf, SparkContext} object Bandwidthlimited_local2HDFS_Writer {
val kiloByte = 1024
def upload_one_buffer(inStream : java.io.BufferedInputStream,
outputStream : org.apache.hadoop.fs.FSDataOutputStream,
log_buffer : Array[Byte],
pre_buffer_sum : Long,
totalSize : Long
) : Long = {
val readSize = inStream.read(log_buffer)
val buffer_sum = pre_buffer_sum + readSize
outputStream.write(log_buffer.splitAt(readSize)._1)
outputStream.flush
TimeUnit.MILLISECONDS.sleep(999)
// println(s"${inStream} uploading. ${buffer_sum} uploaded. readSize : ${readSize}. ${buffer_sum * 100 / totalSize}% finished. ")
buffer_sum
}
def LocalLog2HDFS_Writer(sc : SparkContext,
localSrcPath : String,
remoteTarPath : String,
bandwidth : String
) : Long = {
sc.hadoopConfiguration.setBoolean("dfs.support.append",true)
val hdfs = FileSystem.get(new URI("/"), sc.hadoopConfiguration)
val filePath = new Path(remoteTarPath)
val inStream = new BufferedInputStream(new FileInputStream(localSrcPath))
val totalSize = inStream.available
hdfs.exists(filePath) match {
case false => hdfs.create(filePath).close
case true => println(hdfs.getFileStatus(filePath).toString)
}
val outputStream = hdfs.append(filePath)
val buffer_size = kiloByte * bandwidth.toInt
val log_buffer = new Array[Byte](buffer_size)
var buffer_sum = 0L
try {
while(inStream.available >= buffer_size) {
val readSize = inStream.read(log_buffer)
buffer_sum += readSize
outputStream.write(log_buffer.splitAt(readSize)._1)
outputStream.flush
outputStream.hflush
println(s"${localSrcPath} uploading. ${buffer_sum} uploaded. readSize : ${readSize}. ${buffer_sum * 100 / totalSize}% finished. ")
TimeUnit.MILLISECONDS.sleep(999)
}
if(inStream.available > 0) {
val readSize = inStream.read(log_buffer)
buffer_sum += readSize
outputStream.write(log_buffer.splitAt(readSize)._1)
outputStream.flush
println(s"${localSrcPath} uploading. ${buffer_sum} uploaded. readSize : ${readSize}. ${buffer_sum * 100 / totalSize}% finished. ")
}
} finally {
inStream.close
outputStream.close
}
buffer_sum
}
def Local2HDFS_Writer(sc : SparkContext, args: Array[String]) : Long = {
val helper_info = """ the file localSrcPath pointed limited 1.999G
Bandwidthlimited_local2HDFS_Writer localSrcPath remoteTarPath bandwidth=10K(by KB)"""
println(helper_info)
require(args.size >= 3, helper_info)
val localSrcPath = args(0)
val remoteTarPath = args(1)
val bandwidth = args(2)
LocalLog2HDFS_Writer(sc, localSrcPath, remoteTarPath, bandwidth)
}
def LocalLogReducer2HDFS(sc : SparkContext, taskList : List[(String, String)], bandwidth : String) : Int = {
var sum = 0
taskList.iterator.map{
case (localSrcPath, remoteTarPath) =>
LocalLog2HDFS_Writer(sc, localSrcPath, remoteTarPath, bandwidth)
sum += 1
}
sum
}
def LocalLogReducer(sc : SparkContext, srcParentPath : String, bandwidth : String) = {} def main(args: Array[String]) { val conf = new SparkConf()
.setAppName("Bandwidthlimited_local2HDFS_Writer")
.setMaster("local[1]")
val sc = new SparkContext(conf)
Local2HDFS_Writer(sc, args)
sc.stop()
}
}
https://github.com/Suanec/Betn_repo/blob/32d56acd3b57efc15573389619ed7793efdf298c/joyCodes/assembly_lib/src/main/scala/Bandwidthlimited_local2HDFS_Writer.scala
暴力破解版,为了优先实现功能,利用Spark + Scala依托于Hadoop API,实现了一个上传限速的功能。存在的问题:
1. hdfs 官方说append本身是不安全的,不建议使用在生产环境中。
2. 限制网速是通过限制流的读写来实现的,可能会出现网速震荡,但平均值符合预期。
3. 网速限制以KB为单位,请留意。
4. 文件大小受限于读入流的问题,目前仅能保证1.999G文件正常使用,超过后可能出现,进度监控失败,重复上传,乱码等问题。
Hadoop fs -put bandwidth 暴力版的更多相关文章
- Hadoop介绍及最新稳定版Hadoop 2.4.1下载地址及单节点安装
Hadoop介绍 Hadoop是一个能对大量数据进行分布式处理的软件框架.其基本的组成包括hdfs分布式文件系统和可以运行在hdfs文件系统上的MapReduce编程模型,以及基于hdfs和MapR ...
- 执行hadoop fs -ls时出现错误RuntimeException: core-site.xml not found
由于暴力关机,Hadoop fs -ls 出现了下图问题: 问题出现的原因是下面红框框里面的东西,我当时以为从另一个节点下载一个conf.cloudera.yarn文件就能解决问题,发现不行啊,于是删 ...
- 【转】Hadoop FS Shell命令
FS Shell 调用文件系统(FS)Shell命令应使用 bin/hadoop fs <args> 的形式. 所有的的FS shell命令使用URI路径作为参数.URI格式是scheme ...
- hadoop fs 命令
1,hadoop fs –fs [local | <file system URI>]:声明hadoop使用的文件系统,如果不声明的话,使用当前配置文件配置的,按如下顺序查找:hadoop ...
- hadoop fs -mkdir testdata错误 提示No such file or directory
解决方法: hadoop fs -mkdir -p testdata
- Hadoop FS shell commands
命令格式:hadoop fs -command -option args appendToFileUsage: hadoop fs -appendToFile <localsrc> ... ...
- 何时使用hadoop fs、hadoop dfs与hdfs dfs命令(转)
hadoop fs:使用面最广,可以操作任何文件系统. hadoop dfs与hdfs dfs:只能操作HDFS文件系统相关(包括与Local FS间的操作),前者已经Deprecated,一般使用后 ...
- hadoop fs管理文件权限
sudo addgroup Hadoop#添加一个hadoop组sudo usermod -a -G hadoop larry#将当前用户加入到hadoop组 修改hadoop目录的权限sudo ch ...
- HDFS的基本shell操作,hadoop fs操作命令
(1)分布式文件系统 随着数据量越来越多,在一个操作系统管辖的范围存不下了,那么就分配到更多的操作系统管理的磁盘中,但是不方便管理和维护,因此迫切需要一种系统来管理多台机器上的文件,这就是分布式文件管 ...
随机推荐
- echarts-环形图处理图列中的点击,使百分比的数据列不发生变化,默认追加其他选项
将下列代码copy的echarts编辑器中 app.title = '环形图'; var $legendData = ['直接访问','邮件营销','联盟广告','视频广告','搜索引擎']; var ...
- 每天一个linux命令(15):tail命令
1.命令简介 tail (tail) 用来显示档案的结尾(默认为10行)至标准输出中.若指定了多于一个文件,程序会在每段输出的开始添加相应文件名作为头.如果不指定文件或文件为"-" ...
- kafka创建topic异常
问题描述: kafak运行在weblogic账户下,jdk1.8,当在root账户下创建topic(当前账户下的jdk1.6)导致创建topic失败 ./bin/kafka-topics.sh --c ...
- Lucene与Solr基础
SolrSelectTest 查询与删除 package com.snow.solr; import com.snow.bean.Product; import org.apache.solr.cli ...
- C# 中字符串转换成日期
我们在处理字符串日期格式常用DateTime.Pares() 但是这个形式的转换是相当有限的,有些C#是会不懂你写入的日期格式的如20031231.那么类似 "20100101" ...
- Oracle 18C DBCA建库报ora-01012错误
操作系统:rhel 7.2 解决方案: 1).设置/etc/systemd/logind.conf中RemoveIPC=no2).重启服务器或者重启systemd-logind重启systemd-lo ...
- JS 日期补0
js日期需要yyyy-mm-dd的时候只显示yyyy-m-d,需要前面补充0,之前都是用的判断,感觉非常low.刚刚看到一个方法padStart用了用还不错,padStart是为数值补全指定位数,对应 ...
- [HBase Manual] CH2 Getting Started
Getting Started Getting Started 1. Introduction 2.Quick Start-Strandalone HBase 2.1 JDK版本选择 2.2 Get ...
- linux下以‘-’开头的文件名
linux下以‘-’开头的文件名,cp.mv.rm.ls等对他都是无效的: [root@ha131 ~]# ll -plat.py ls:无效选项 -- . 请尝试执行"ls --help& ...
- Git 子模块 - submodule(转)
原文地址: http://www.cnblogs.com/kelsen/p/5918672.html 有种情况我们经常会遇到:某个工作中的项目需要包含并使用另一个项目. 也许是第三方库,或者你 独立开 ...