/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver; import java.io.IOException;
import java.util.Arrays;
import java.util.Random; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.io.BytesWritable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser;
import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
import org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException;
import org.apache.hbase.thirdparty.org.apache.commons.cli.PosixParser; /**
* Creates an HFile with random key/value pairs.
*/
public class CreateRandomStoreFile { /**
* As much as this number of bytes can be added or subtracted from key/value
* lengths.
*/
private static final int LEN_VARIATION = 5; private static final Logger LOG =
LoggerFactory.getLogger(CreateRandomStoreFile.class);
private static final String OUTPUT_DIR_OPTION = "o";
private static final String NUM_KV_OPTION = "n";
private static final String HFILE_VERSION_OPTION = "h";
private static final String KEY_SIZE_OPTION = "k";
private static final String VALUE_SIZE_OPTION = "v";
private static final String COMPRESSION_OPTION = "c";
private static final String BLOOM_FILTER_OPTION = "bf";
private static final String BLOCK_SIZE_OPTION = "bs";
private static final String BLOOM_BLOCK_SIZE_OPTION = "bfbs";
private static final String INDEX_BLOCK_SIZE_OPTION = "ibs"; /** The exit code this command-line tool returns on failure */
private static final int EXIT_FAILURE = 1; /** The number of valid key types in a store file */
private static final int NUM_VALID_KEY_TYPES =
KeyValue.Type.values().length - 2; private Options options = new Options(); private int keyPrefixLen, keyLen, rowLen, cfLen, valueLen;
private Random rand; /**
* Runs the tools.
*
* @param args command-line arguments
* @return true in case of success
* @throws IOException
*/
public boolean run(String[] args) throws IOException {
options.addOption(OUTPUT_DIR_OPTION, "output_dir", true,
"Output directory");
options.addOption(NUM_KV_OPTION, "num_kv", true,
"Number of key/value pairs");
options.addOption(KEY_SIZE_OPTION, "key_size", true, "Average key size");
options.addOption(VALUE_SIZE_OPTION, "value_size", true,
"Average value size");
options.addOption(HFILE_VERSION_OPTION, "hfile_version", true,
"HFile version to create");
options.addOption(COMPRESSION_OPTION, "compression", true,
" Compression type, one of "
+ Arrays.toString(Compression.Algorithm.values()));
options.addOption(BLOOM_FILTER_OPTION, "bloom_filter", true,
"Bloom filter type, one of "
+ Arrays.toString(BloomType.values()));
options.addOption(BLOCK_SIZE_OPTION, "block_size", true,
"HFile block size");
options.addOption(BLOOM_BLOCK_SIZE_OPTION, "bloom_block_size", true,
"Compound Bloom filters block size");
options.addOption(INDEX_BLOCK_SIZE_OPTION, "index_block_size", true,
"Index block size"); if (args.length == 0) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp(CreateRandomStoreFile.class.getSimpleName(), options,
true);
return false;
} CommandLineParser parser = new PosixParser();
CommandLine cmdLine;
try {
cmdLine = parser.parse(options, args);
} catch (ParseException ex) {
LOG.error(ex.toString(), ex);
return false;
} if (!cmdLine.hasOption(OUTPUT_DIR_OPTION)) {
LOG.error("Output directory is not specified");
return false;
} if (!cmdLine.hasOption(NUM_KV_OPTION)) {
LOG.error("The number of keys/values not specified");
return false;
} if (!cmdLine.hasOption(KEY_SIZE_OPTION)) {
LOG.error("Key size is not specified");
return false;
} if (!cmdLine.hasOption(VALUE_SIZE_OPTION)) {
LOG.error("Value size not specified");
return false;
} Configuration conf = HBaseConfiguration.create(); Path outputDir = new Path(cmdLine.getOptionValue(OUTPUT_DIR_OPTION)); long numKV = Long.parseLong(cmdLine.getOptionValue(NUM_KV_OPTION));
configureKeyValue(numKV,
Integer.parseInt(cmdLine.getOptionValue(KEY_SIZE_OPTION)),
Integer.parseInt(cmdLine.getOptionValue(VALUE_SIZE_OPTION))); FileSystem fs = FileSystem.get(conf); Compression.Algorithm compr = Compression.Algorithm.NONE;
if (cmdLine.hasOption(COMPRESSION_OPTION)) {
compr = Compression.Algorithm.valueOf(
cmdLine.getOptionValue(COMPRESSION_OPTION));
} BloomType bloomType = BloomType.NONE;
if (cmdLine.hasOption(BLOOM_FILTER_OPTION)) {
bloomType = BloomType.valueOf(cmdLine.getOptionValue(
BLOOM_FILTER_OPTION));
} int blockSize = HConstants.DEFAULT_BLOCKSIZE;
if (cmdLine.hasOption(BLOCK_SIZE_OPTION))
blockSize = Integer.valueOf(cmdLine.getOptionValue(BLOCK_SIZE_OPTION)); if (cmdLine.hasOption(BLOOM_BLOCK_SIZE_OPTION)) {
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
Integer.valueOf(cmdLine.getOptionValue(BLOOM_BLOCK_SIZE_OPTION)));
} if (cmdLine.hasOption(INDEX_BLOCK_SIZE_OPTION)) {
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY,
Integer.valueOf(cmdLine.getOptionValue(INDEX_BLOCK_SIZE_OPTION)));
} HFileContext meta = new HFileContextBuilder().withCompression(compr)
.withBlockSize(blockSize).build();
StoreFileWriter sfw = new StoreFileWriter.Builder(conf,
new CacheConfig(conf), fs)
.withOutputDir(outputDir)
.withBloomType(bloomType)
.withMaxKeyCount(numKV)
.withFileContext(meta)
.build(); rand = new Random();
LOG.info("Writing " + numKV + " key/value pairs");
for (long i = 0; i < numKV; ++i) {
sfw.append(generateKeyValue(i));
} int numMetaBlocks = rand.nextInt(10) + 1;
LOG.info("Writing " + numMetaBlocks + " meta blocks");
for (int metaI = 0; metaI < numMetaBlocks; ++metaI) {
sfw.getHFileWriter().appendMetaBlock(generateString(),
new BytesWritable(generateValue()));
}
sfw.close(); Path storeFilePath = sfw.getPath();
long fileSize = fs.getFileStatus(storeFilePath).getLen();
LOG.info("Created {}, {} bytes, compression={}", storeFilePath, fileSize, compr.toString()); return true;
} private void configureKeyValue(long numKV, int keyLen, int valueLen) {
numKV = Math.abs(numKV);
keyLen = Math.abs(keyLen);
keyPrefixLen = 0;
while (numKV != 0) {
numKV >>>= 8;
++keyPrefixLen;
} this.keyLen = Math.max(keyPrefixLen, keyLen);
this.valueLen = valueLen; // Arbitrarily split the key into row, column family, and qualifier.
rowLen = keyPrefixLen / 3;
cfLen = keyPrefixLen / 4;
} private int nextInRange(int range) {
return rand.nextInt(2 * range + 1) - range;
} public KeyValue generateKeyValue(long i) {
byte[] k = generateKey(i);
byte[] v = generateValue(); return new KeyValue(
k, 0, rowLen,
k, rowLen, cfLen,
k, rowLen + cfLen, k.length - rowLen - cfLen,
rand.nextLong(),
generateKeyType(rand),
v, 0, v.length);
} public static KeyValue.Type generateKeyType(Random rand) {
if (rand.nextBoolean()) {
// Let's make half of KVs puts.
return KeyValue.Type.Put;
} else {
KeyValue.Type keyType =
KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum)
{
throw new RuntimeException("Generated an invalid key type: " + keyType
+ ". " + "Probably the layout of KeyValue.Type has changed.");
}
return keyType;
}
} private String generateString() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < rand.nextInt(10); ++i) {
sb.append((char) ('A' + rand.nextInt(26)));
}
return sb.toString();
} private byte[] generateKey(long i) {
byte[] k = new byte[Math.max(keyPrefixLen, keyLen
+ nextInRange(LEN_VARIATION))];
for (int pos = keyPrefixLen - 1; pos >= 0; --pos) {
k[pos] = (byte) (i & 0xFF);
i >>>= 8;
}
for (int pos = keyPrefixLen; pos < k.length; ++pos) {
k[pos] = (byte) rand.nextInt(256);
}
return k;
} private byte[] generateValue() {
byte[] v = new byte[Math.max(1, valueLen + nextInRange(LEN_VARIATION))];
for (int i = 0; i < v.length; ++i) {
v[i] = (byte) rand.nextInt(256);
}
return v;
} public static void main(String[] args) {
CreateRandomStoreFile app = new CreateRandomStoreFile();
try {
if (!app.run(args))
System.exit(EXIT_FAILURE);
} catch (IOException ex) {
LOG.error(ex.toString(), ex);
System.exit(EXIT_FAILURE);
} } }

HBase源码实战:CreateRandomStoreFile的更多相关文章

  1. HBase源码实战:BufferedMutator

    /** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agr ...

  2. HBase源码实战:ImportTsv

    /** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agr ...

  3. Hbase源码分析:Hbase UI中Requests Per Second的具体含义

    Hbase源码分析:Hbase UI中Requests Per Second的具体含义 让运维加监控,被问到Requests Per Second(见下图)的具体含义是什么?我一时竟回答不上来,虽然大 ...

  4. hbase源码系列(十二)Get、Scan在服务端是如何处理?

    继上一篇讲了Put和Delete之后,这一篇我们讲Get和Scan, 因为我发现这两个操作几乎是一样的过程,就像之前的Put和Delete一样,上一篇我本来只打算写Put的,结果发现Delete也可以 ...

  5. hbase源码带注释版本,放在这里,方便大家下载吧

    看了5个月的hbase源码,记录了一些笔记,如果有需要的朋友可以拿去. 里面总共包括几个主要的工程吧:hbase-common,hbase-client,hbase-prefix-tree,hbase ...

  6. HBase源码学习系列

    转自:http://www.cnblogs.com/cenyuhai/tag/hbase%E6%BA%90%E7%A0%81%E7%B3%BB%E5%88%97/ (mark) hbase源码系列(十 ...

  7. Hbase源码分析:RPC概况

    RPC是hbase中Master,RegionServer和Client三者之间通信交流的纽带.了解hbase的rpc机制能够为通过源码学习hbase奠定良好的基础.因为了解了hbase的rpc机制能 ...

  8. 11 hbase源码系列(十一)Put、Delete在服务端是如何处理

    hbase源码系列(十一)Put.Delete在服务端是如何处理?    在讲完之后HFile和HLog之后,今天我想分享是Put在Region Server经历些了什么?相信前面看了<HTab ...

  9. hbase源码系列(十二)Get、Scan在服务端是如何处理

    hbase源码系列(十二)Get.Scan在服务端是如何处理?   继上一篇讲了Put和Delete之后,这一篇我们讲Get和Scan, 因为我发现这两个操作几乎是一样的过程,就像之前的Put和Del ...

随机推荐

  1. 『Kruscal重构树 Exkruscal』

    新增一道例题及讲解 Exkruscal \(Exkruscal\)又称\(Kruscal\)重构树,是一种利用经典算法\(Kruscal\)来实现的构造算法,可以将一张无向图重构为一棵具有\(2n-1 ...

  2. 从锅炉工到AI专家(5)

    图像识别基本原理 从上一篇开始,我们终于进入到了TensorFlow机器学习的世界.采用第一个分类算法进行手写数字识别得到了一个91%左右的识别率结果,进展可喜,但成绩尚不能令人满意. 结果不满意的原 ...

  3. Solr 08 - 在Solr Web管理页面中查询索引数据 (Solr中各类查询参数的使用方法)

    目录 1 Solr管理页面的查询入口 2 Solr查询输入框简介 3 Solr管理页面的查询方案 1 Solr管理页面的查询入口 选中需要查询的SolrCore, 然后在菜单栏选择[Query]: 2 ...

  4. .net core 使用ConcurrentTest组件对方法进行压力测试

    工欲善其事,必先利其器!在编写服务中首先要有一个好的测试工具,在dontecore下性能测试有BenchmarkDotNet,只需要简单的配置一下就可以对方法的性能进行详细的测试.但有时候需要对不同并 ...

  5. nginx部署~dotnetCore+mvc网站502

    这个不是nginx的问题,也不是dotnet core的问题,也不是mvc的问题,更不是防火墙的问题! 原因在于这个SeLinux 把它关了就可以了 setsebool -P httpd_can_ne ...

  6. windows下安装bpython方法 (新)

    刚开始学习python的时候使用的ipython解释器,挺好用的,后来发现bpython功能更强大,linux系统中安装基本没啥问题,不过在windows下安装倒是不容易啊.使用google搜了一下, ...

  7. PC逆向之代码还原技术,第五讲汇编中乘法的代码还原

    目录 PC逆向之代码还原技术,第五讲汇编中乘法的代码还原 一丶简介乘法指令 1.乘法指令 2.代码还原注意问题 二丶乘法的汇编代码产生的格式 1.高级代码观看 2.乘法的汇编代码还原. 三丶乘法总结 ...

  8. Docker最全教程——从理论到实战(三)

    往期链接: https://www.cnblogs.com/codelove/p/10030439.html https://www.cnblogs.com/codelove/p/10036608.h ...

  9. nginx部署dotnet core站点

    步骤 aspnetcore程序端口号5001,实际外部端口号8001,相当于把8001收到的请求转发给5001. 把发布出来的文件全部丢掉 /var/www/JuXiangTou 里面去.可以用scp ...

  10. [JavaScript] JavaScript事件注册,事件委托,冒泡,捕获,事件流

    面试题 event 事件 事件委托是什么? 如何阻止事件冒泡,阻止默认事件呢? Javascript 的事件流模型都有什么? 事件绑定和普通事件有什么区别? Event 对象 Event 对象,当事件 ...