/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nutch.segment; import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.io.Writer;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.UTF8;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapFileOutputFormat;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.util.Progressable;
import org.apache.nutch.crawl.CrawlDatum;
import org.apache.nutch.crawl.NutchWritable;
import org.apache.nutch.parse.ParseData;
import org.apache.nutch.parse.ParseText;
import org.apache.nutch.protocol.Content;
import org.apache.nutch.util.HadoopFSUtil;
import org.apache.nutch.util.LogUtil;
import org.apache.nutch.util.NutchConfiguration;
import org.apache.nutch.util.NutchJob; /** Dump the content of a segment. */
public class SegmentReader extends Configured implements
Reducer<Text, NutchWritable, Text, Text> { public static final Log LOG = LogFactory.getLog(SegmentReader.class); long recNo = 0L; private boolean co, fe, ge, pa, pd, pt;
private FileSystem fs; public static class InputCompatMapper extends MapReduceBase implements
Mapper<WritableComparable, Writable, Text, NutchWritable> {
private Text newKey = new Text(); public void map(WritableComparable key, Writable value,
OutputCollector<Text, NutchWritable> collector, Reporter reporter) throws IOException {
// convert on the fly from old formats with UTF8 keys
if (key instanceof UTF8) {
newKey.set(key.toString());
key = newKey;
}
collector.collect((Text)key, new NutchWritable(value));
} } /** Implements a text output format */
public static class TextOutputFormat extends
FileOutputFormat<WritableComparable, Writable> {
public RecordWriter<WritableComparable, Writable> getRecordWriter(
final FileSystem fs, JobConf job,
String name, final Progressable progress) throws IOException { final Path segmentDumpFile = new Path(FileOutputFormat.getOutputPath(job), name); // Get the old copy out of the way
if (fs.exists(segmentDumpFile)) fs.delete(segmentDumpFile, true); final PrintStream printStream = new PrintStream(fs.create(segmentDumpFile));
return new RecordWriter<WritableComparable, Writable>() {
public synchronized void write(WritableComparable key, Writable value) throws IOException {
printStream.println(value);
} public synchronized void close(Reporter reporter) throws IOException {
printStream.close();
}
};
}
} public SegmentReader() {
super(null);
} public SegmentReader(Configuration conf, boolean co, boolean fe, boolean ge, boolean pa,
boolean pd, boolean pt) {
super(conf);
this.co = co;
this.fe = fe;
this.ge = ge;
this.pa = pa;
this.pd = pd;
this.pt = pt;
try {
this.fs = FileSystem.get(getConf());
} catch (IOException e) {
e.printStackTrace(LogUtil.getWarnStream(LOG));
}
} public void configure(JobConf job) {
setConf(job);
this.co = getConf().getBoolean("segment.reader.co", true);
this.fe = getConf().getBoolean("segment.reader.fe", true);
this.ge = getConf().getBoolean("segment.reader.ge", true);
this.pa = getConf().getBoolean("segment.reader.pa", true);
this.pd = getConf().getBoolean("segment.reader.pd", true);
this.pt = getConf().getBoolean("segment.reader.pt", true);
try {
this.fs = FileSystem.get(getConf());
} catch (IOException e) {
e.printStackTrace(LogUtil.getWarnStream(LOG));
}
} private JobConf createJobConf() {
JobConf job = new NutchJob(getConf());
job.setBoolean("segment.reader.co", this.co);
job.setBoolean("segment.reader.fe", this.fe);
job.setBoolean("segment.reader.ge", this.ge);
job.setBoolean("segment.reader.pa", this.pa);
job.setBoolean("segment.reader.pd", this.pd);
job.setBoolean("segment.reader.pt", this.pt);
return job;
} public void close() {} public void reduce(Text key, Iterator<NutchWritable> values,
OutputCollector<Text, Text> output, Reporter reporter)
throws IOException {
StringBuffer dump = new StringBuffer(); dump.append("\nRecno:: ").append(recNo++).append("\n");
dump.append("URL:: " + key.toString() + "\n");
recNo++;
while (values.hasNext()) {
Writable value = values.next().get(); // unwrap
if (value instanceof CrawlDatum) {
dump.append("\nCrawlDatum::\n").append(((CrawlDatum) value).toString());
} else if (value instanceof Content) {
Content c= (Content) value;
String encoding=c.getMetadata().get("charset");
if(null==encoding){
System.out.println("encoding is null, set default encoding UTF-8 !");
encoding="GBK";
}
dump.append("\nContent::\n").append(c.toString(encoding));
} else if (value instanceof ParseData) {
dump.append("\nParseData::\n").append(((ParseData) value).toString());
} else if (value instanceof ParseText) {
dump.append("\nParseText::\n").append(((ParseText) value).toString());
} else if (LOG.isWarnEnabled()) {
LOG.warn("Unrecognized type: " + value.getClass());
}
}
output.collect(key, new Text(dump.toString()));
} public void dump(Path segment, Path output) throws IOException { /*批量dump--20121203*/
/*2号位置开始*/
/*原因 文件夹路径过长
* if (LOG.isInfoEnabled()) {
LOG.info("SegmentReader: dump segment: " + segment);
}*/
/*2号位置开始*/ JobConf job = createJobConf();
job.setJobName("read " + segment); if (ge) FileInputFormat.addInputPath(job, new Path(segment, CrawlDatum.GENERATE_DIR_NAME));
if (fe) FileInputFormat.addInputPath(job, new Path(segment, CrawlDatum.FETCH_DIR_NAME));
if (pa) FileInputFormat.addInputPath(job, new Path(segment, CrawlDatum.PARSE_DIR_NAME));
if (co) FileInputFormat.addInputPath(job, new Path(segment, Content.DIR_NAME));
if (pd) FileInputFormat.addInputPath(job, new Path(segment, ParseData.DIR_NAME));
if (pt) FileInputFormat.addInputPath(job, new Path(segment, ParseText.DIR_NAME)); job.setInputFormat(SequenceFileInputFormat.class);
job.setMapperClass(InputCompatMapper.class);
job.setReducerClass(SegmentReader.class); Path tempDir = new Path(job.get("hadoop.tmp.dir", "/tmp") + "/segread-" + new java.util.Random().nextInt());
fs.delete(tempDir, true); FileOutputFormat.setOutputPath(job, tempDir);
job.setOutputFormat(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NutchWritable.class); JobClient.runJob(job); // concatenate the output
Path dumpFile = new Path(output, job.get("segment.dump.dir", "dump")); // remove the old file
fs.delete(dumpFile, true);
FileStatus[] fstats = fs.listStatus(tempDir, HadoopFSUtil.getPassAllFilter());
Path[] files = HadoopFSUtil.getPaths(fstats); PrintWriter writer = null;
int currentRecordNumber = 0;
if (files.length > 0) {
writer = new PrintWriter(new BufferedWriter(new OutputStreamWriter(fs.create(dumpFile),"UTF-8")));
try {
for (int i = 0; i < files.length; i++) {
Path partFile = (Path) files[i];
try {
currentRecordNumber = append(fs, job, partFile, writer, currentRecordNumber);
} catch (IOException exception) {
if (LOG.isWarnEnabled()) {
LOG.warn("Couldn't copy the content of " + partFile.toString() +
" into " + dumpFile.toString());
LOG.warn(exception.getMessage());
}
}
}
} finally {
writer.close();
}
}
fs.delete(tempDir);
if (LOG.isInfoEnabled()) { LOG.info("SegmentReader: done"); }
} /** Appends two files and updates the Recno counter */
private int append(FileSystem fs, Configuration conf, Path src, PrintWriter writer, int currentRecordNumber)
throws IOException {
BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(src)));
try {
String line = reader.readLine();
while (line != null) {
if (line.startsWith("Recno:: ")) {
line = "Recno:: " + currentRecordNumber++;
}
writer.println(line);
line = reader.readLine();
}
return currentRecordNumber;
} finally {
reader.close();
}
} private static final String[][] keys = new String[][] {
{"co", "Content::\n"},
{"ge", "Crawl Generate::\n"},
{"fe", "Crawl Fetch::\n"},
{"pa", "Crawl Parse::\n"},
{"pd", "ParseData::\n"},
{"pt", "ParseText::\n"}
}; public void get(final Path segment, final Text key, Writer writer,
final Map<String, List<Writable>> results) throws Exception {
LOG.info("SegmentReader: get '" + key + "'");
ArrayList<Thread> threads = new ArrayList<Thread>();
if (co) threads.add(new Thread() {
public void run() {
try {
List<Writable> res = getMapRecords(new Path(segment, Content.DIR_NAME), key);
results.put("co", res);
} catch (Exception e) {
e.printStackTrace(LogUtil.getWarnStream(LOG));
}
}
});
if (fe) threads.add(new Thread() {
public void run() {
try {
List<Writable> res = getMapRecords(new Path(segment, CrawlDatum.FETCH_DIR_NAME), key);
results.put("fe", res);
} catch (Exception e) {
e.printStackTrace(LogUtil.getWarnStream(LOG));
}
}
});
if (ge) threads.add(new Thread() {
public void run() {
try {
List<Writable> res = getSeqRecords(new Path(segment, CrawlDatum.GENERATE_DIR_NAME), key);
results.put("ge", res);
} catch (Exception e) {
e.printStackTrace(LogUtil.getWarnStream(LOG));
}
}
});
if (pa) threads.add(new Thread() {
public void run() {
try {
List<Writable> res = getSeqRecords(new Path(segment, CrawlDatum.PARSE_DIR_NAME), key);
results.put("pa", res);
} catch (Exception e) {
e.printStackTrace(LogUtil.getWarnStream(LOG));
}
}
});
if (pd) threads.add(new Thread() {
public void run() {
try {
List<Writable> res = getMapRecords(new Path(segment, ParseData.DIR_NAME), key);
results.put("pd", res);
} catch (Exception e) {
e.printStackTrace(LogUtil.getWarnStream(LOG));
}
}
});
if (pt) threads.add(new Thread() {
public void run() {
try {
List<Writable> res = getMapRecords(new Path(segment, ParseText.DIR_NAME), key);
results.put("pt", res);
} catch (Exception e) {
e.printStackTrace(LogUtil.getWarnStream(LOG));
}
}
});
Iterator<Thread> it = threads.iterator();
while (it.hasNext()) it.next().start();
int cnt;
do {
cnt = 0;
try {
Thread.sleep(5000);
} catch (Exception e) {};
it = threads.iterator();
while (it.hasNext()) {
if (it.next().isAlive()) cnt++;
}
if ((cnt > 0) && (LOG.isDebugEnabled())) {
LOG.debug("(" + cnt + " to retrieve)");
}
} while (cnt > 0);
for (int i = 0; i < keys.length; i++) {
List<Writable> res = results.get(keys[i][0]);
if (res != null && res.size() > 0) {
for (int k = 0; k < res.size(); k++) {
writer.write(keys[i][1]);
writer.write(res.get(k) + "\n");
}
}
writer.flush();
}
} private List<Writable> getMapRecords(Path dir, Text key) throws Exception {
MapFile.Reader[] readers = MapFileOutputFormat.getReaders(fs, dir, getConf());
ArrayList<Writable> res = new ArrayList<Writable>();
Class keyClass = readers[0].getKeyClass();
Class valueClass = readers[0].getValueClass();
if (!keyClass.getName().equals("org.apache.hadoop.io.Text"))
throw new IOException("Incompatible key (" + keyClass.getName() + ")");
Writable value = (Writable)valueClass.newInstance();
// we don't know the partitioning schema
for (int i = 0; i < readers.length; i++) {
if (readers[i].get(key, value) != null)
res.add(value);
readers[i].close();
}
return res;
} private List<Writable> getSeqRecords(Path dir, Text key) throws Exception {
SequenceFile.Reader[] readers = SequenceFileOutputFormat.getReaders(getConf(), dir);
ArrayList<Writable> res = new ArrayList<Writable>();
Class keyClass = readers[0].getKeyClass();
Class valueClass = readers[0].getValueClass();
if (!keyClass.getName().equals("org.apache.hadoop.io.Text"))
throw new IOException("Incompatible key (" + keyClass.getName() + ")");
Writable aKey = (Writable)keyClass.newInstance();
Writable value = (Writable)valueClass.newInstance();
for (int i = 0; i < readers.length; i++) {
while (readers[i].next(aKey, value)) {
if (aKey.equals(key))
res.add(value);
}
readers[i].close();
}
return res;
} public static class SegmentReaderStats {
public long start = -1L;
public long end = -1L;
public long generated = -1L;
public long fetched = -1L;
public long fetchErrors = -1L;
public long parsed = -1L;
public long parseErrors = -1L;
} SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); public void list(List<Path> dirs, Writer writer) throws Exception {
writer.write("NAME\t\tGENERATED\tFETCHER START\t\tFETCHER END\t\tFETCHED\tPARSED\n");
for (int i = 0; i < dirs.size(); i++) {
Path dir = dirs.get(i);
SegmentReaderStats stats = new SegmentReaderStats();
getStats(dir, stats);
writer.write(dir.getName() + "\t");
if (stats.generated == -1) writer.write("?");
else writer.write(stats.generated + "");
writer.write("\t\t");
if (stats.start == -1) writer.write("?\t");
else writer.write(sdf.format(new Date(stats.start)));
writer.write("\t");
if (stats.end == -1) writer.write("?");
else writer.write(sdf.format(new Date(stats.end)));
writer.write("\t");
if (stats.fetched == -1) writer.write("?");
else writer.write(stats.fetched + "");
writer.write("\t");
if (stats.parsed == -1) writer.write("?");
else writer.write(stats.parsed + "");
writer.write("\n");
writer.flush();
}
} public void getStats(Path segment, final SegmentReaderStats stats) throws Exception {
SequenceFile.Reader[] readers = SequenceFileOutputFormat.getReaders(getConf(), new Path(segment, CrawlDatum.GENERATE_DIR_NAME));
long cnt = 0L;
Text key = new Text();
for (int i = 0; i < readers.length; i++) {
while (readers[i].next(key)) cnt++;
readers[i].close();
}
stats.generated = cnt;
Path fetchDir = new Path(segment, CrawlDatum.FETCH_DIR_NAME);
if (fs.exists(fetchDir) && fs.getFileStatus(fetchDir).isDir()) {
cnt = 0L;
long start = Long.MAX_VALUE;
long end = Long.MIN_VALUE;
CrawlDatum value = new CrawlDatum();
MapFile.Reader[] mreaders = MapFileOutputFormat.getReaders(fs, fetchDir, getConf());
for (int i = 0; i < mreaders.length; i++) {
while (mreaders[i].next(key, value)) {
cnt++;
if (value.getFetchTime() < start) start = value.getFetchTime();
if (value.getFetchTime() > end) end = value.getFetchTime();
}
mreaders[i].close();
}
stats.start = start;
stats.end = end;
stats.fetched = cnt;
}
Path parseDir = new Path(segment, ParseData.DIR_NAME);
if (fs.exists(fetchDir) && fs.getFileStatus(fetchDir).isDir()) {
cnt = 0L;
long errors = 0L;
ParseData value = new ParseData();
MapFile.Reader[] mreaders = MapFileOutputFormat.getReaders(fs, parseDir, getConf());
for (int i = 0; i < mreaders.length; i++) {
while (mreaders[i].next(key, value)) {
cnt++;
if (!value.getStatus().isSuccess()) errors++;
}
mreaders[i].close();
}
stats.parsed = cnt;
stats.parseErrors = errors;
}
} private static final int MODE_DUMP = 0; private static final int MODE_LIST = 1; private static final int MODE_GET = 2; public static void main(String[] args) throws Exception {
if (args.length < 2) {
usage();
return;
}
int mode = -1;
if (args[0].equals("-dump"))
mode = MODE_DUMP;
else if (args[0].equals("-list"))
mode = MODE_LIST;
else if (args[0].equals("-get")) mode = MODE_GET; boolean co = true;
boolean fe = true;
boolean ge = true;
boolean pa = true;
boolean pd = true;
boolean pt = true;
// collect general options
for (int i = 1; i < args.length; i++) {
if (args[i].equals("-nocontent")) {
co = false;
args[i] = null;
} else if (args[i].equals("-nofetch")) {
fe = false;
args[i] = null;
} else if (args[i].equals("-nogenerate")) {
ge = false;
args[i] = null;
} else if (args[i].equals("-noparse")) {
pa = false;
args[i] = null;
} else if (args[i].equals("-noparsedata")) {
pd = false;
args[i] = null;
} else if (args[i].equals("-noparsetext")) {
pt = false;
args[i] = null;
}
}
Configuration conf = NutchConfiguration.create();
final FileSystem fs = FileSystem.get(conf);
SegmentReader segmentReader = new SegmentReader(conf, co, fe, ge, pa, pd, pt);
// collect required args
switch (mode) {
case MODE_DUMP:
String input = args[1];
if (input == null) {
System.err.println("Missing required argument: <segment_dir>");
usage();
return;
}
String output = args.length > 2 ? args[2] : null;
if (output == null) {
System.err.println("Missing required argument: <output>");
usage();
return;
} /*批量dump--20121203*/
/*1号位置开始*/
File fInput=new File(input);
File[] filesSeg=fInput.listFiles();
for (File inseg : filesSeg) {
String seg=inseg.getName();
LOG.info(seg);
File ouseg=new File(output,seg);
if(!ouseg.exists()){
ouseg.mkdir();
}
segmentReader.dump(new Path(inseg.getAbsolutePath()), new Path(ouseg.getAbsolutePath()));
} /*代码*/
//segmentReader.dump(new Path(input), new Path(output));//原来的代码
/*1号位置结束*/ return;
case MODE_LIST:
ArrayList<Path> dirs = new ArrayList<Path>();
for (int i = 1; i < args.length; i++) {
if (args[i] == null) continue;
if (args[i].equals("-dir")) {
Path dir = new Path(args[++i]);
FileStatus[] fstats = fs.listStatus(dir, HadoopFSUtil.getPassDirectoriesFilter(fs));
Path[] files = HadoopFSUtil.getPaths(fstats);
if (files != null && files.length > 0) {
dirs.addAll(Arrays.asList(files));
}
} else dirs.add(new Path(args[i]));
}
segmentReader.list(dirs, new OutputStreamWriter(System.out, "UTF-8"));
return;
case MODE_GET:
input = args[1];
if (input == null) {
System.err.println("Missing required argument: <segment_dir>");
usage();
return;
}
String key = args.length > 2 ? args[2] : null;
if (key == null) {
System.err.println("Missing required argument: <keyValue>");
usage();
return;
}
segmentReader.get(new Path(input), new Text(key), new OutputStreamWriter(System.out, "UTF-8"), new HashMap<String, List<Writable>>());
return;
default:
System.err.println("Invalid operation: " + args[0]);
usage();
return;
}
} private static void usage() {
System.err.println("Usage: SegmentReader (-dump ... | -list ... | -get ...) [general options]\n");
System.err.println("* General options:");
System.err.println("\t-nocontent\tignore content directory");
System.err.println("\t-nofetch\tignore crawl_fetch directory");
System.err.println("\t-nogenerate\tignore crawl_generate directory");
System.err.println("\t-noparse\tignore crawl_parse directory");
System.err.println("\t-noparsedata\tignore parse_data directory");
System.err.println("\t-noparsetext\tignore parse_text directory");
System.err.println();
System.err.println("* SegmentReader -dump <segment_dir> <output> [general options]");
System.err.println(" Dumps content of a <segment_dir> as a text file to <output>.\n");
System.err.println("\t<segment_dir>\tname of the segment directory.");
System.err.println("\t<output>\tname of the (non-existent) output directory.");
System.err.println();
System.err.println("* SegmentReader -list (<segment_dir1> ... | -dir <segments>) [general options]");
System.err.println(" List a synopsis of segments in specified directories, or all segments in");
System.err.println(" a directory <segments>, and print it on System.out\n");
System.err.println("\t<segment_dir1> ...\tlist of segment directories to process");
System.err.println("\t-dir <segments>\t\tdirectory that contains multiple segments");
System.err.println();
System.err.println("* SegmentReader -get <segment_dir> <keyValue> [general options]");
System.err.println(" Get a specified record from a segment, and print it on System.out.\n");
System.err.println("\t<segment_dir>\tname of the segment directory.");
System.err.println("\t<keyValue>\tvalue of the key (url).");
System.err.println("\t\tNote: put double-quotes around strings with spaces.");
}
}

SegmentReader 批量 dump的更多相关文章

  1. nutch 二次开发

    /*深度控制*/ 深度控制:nutch是广域网的深度遍历,我们需要的是垂直采集(即只采集某一个栏目),举例,索引页总计20页,如果只有下一页,则深度为20,如果是1 2 3 4 5……20则深度为2即 ...

  2. Arthas 使用(一) —— 基础命令

    Arthas 简介 Arthas 是 Alibaba 开源的 Java 诊断工具,根据官方介绍,它提供了如下工功能: 官方文档地址: https://alibaba.github.io/arthas/ ...

  3. memcache 缓存的批量删除方案(转)

    memcache 默认只支持使用delete(key)和 flush_all,这两种方法都太极端了,不能满足用户的特定需求,如:批量删除‘aaaaaaaa_’开头的所有缓存,这个时候该怎么办? 1 g ...

  4. 夺命雷公狗ThinkPHP项目之----企业网站16之文章批量删除的完成

    我们在做一个网站时候经常会遇到批量删除这个选项,其实也很简单,我们之需要用用jq实现出来效果然后通过表单post过去后端即可实现: 我们做这个功能前必须要先引入jq,我的jq版本是1.8.3,方法如下 ...

  5. Android JUnit Test——批量运行测试代码

    转载请注明出自天外归云的博客园:http://www.cnblogs.com/LanTianYou/ Android测试三要素 写Android测试用例有三要素,一是我们用的“安卓模拟器device” ...

  6. JBOSS批量扫描

    exploit-db提供出了EXP,如下: /* * JBoss JMXInvokerServlet Remote Command Execution * JMXInvoker.java v0.3 - ...

  7. PLSQL_数据泵定参数批量导入多表Expdp/Impdp Parfile(案例)

    2015-04-01 Created By BaoXinjian

  8. 批量迁移Oracle数据文件,日志文件及控制文件

    有些时候需要将Oracle的多个数据文件以及日志文件重定位或者迁移到新的分区或新的位置,比如磁盘空间不足,或因为特殊需求.对于这种情形可以采取批量迁移的方式将多个数据文件或者日志文件实现一次性迁移.当 ...

  9. 批量生成sqlldr文件,高速卸载数据

    SQL*Loader 是用于将外部数据进行批量高速加载的数据库的最高效工具,可用于将多种平面格式文件加载到Oracle数据库.SQL*Loader支持传统路径模式以及直接路径这两种加载模式.关于SQL ...

随机推荐

  1. 理解RESTful架构(转)

    理解RESTful架构   作者: 阮一峰 http://www.ruanyifeng.com/blog/2011/09/restful 越来越多的人开始意识到,网站即软件,而且是一种新型的软件. 这 ...

  2. appium点击屏幕(手势)

    在android测试过程中,会遇到要点击一下屏幕的需求. 在appium旧版本使用下面代码点击android屏幕,没有报错.Map tap = new HashMap(); tap.put(" ...

  3. 【下载分】C语言for循环语句PK自我活动

    想了解自己C语言for语句的掌握程度吗?敢和自己PK较量一番吗?參加"C语言for循环语句PK自我活动",仅仅要成绩70分以上.就可赢得CSDN下载分. 12道题目题库动态读取,每 ...

  4. 【LeetCode】Swap Nodes in Pairs

    Given a linked list, swap every two adjacent nodes and return its head. For example, Given 1->2-& ...

  5. JavaScript--execCommand指令集

    execCommand方法是执行一个对当前文档,当前选择或者给出范围的命令.处理Html数据时常用 如下格式:document.execCommand(sCmd,交互方式, 动态参数) .其中:sCm ...

  6. service redis does not support chkconfig的解决办法

    原文链接: http://my.oschina.net/maczhao/blog/322931 问题解决办法如下: 必须把下面两行注释放在/etc/init.d/redis文件靠前的注释中: # ch ...

  7. PHPMailer中文说明

    PHPMailer中文说明 A开头: $AltBody --属性出自:PHPMailer ::$AltBody文件:class.phpmailer .php说明:该属性的设置是在邮件正文不支持HTML ...

  8. ubuntu wine卸载程序并删除图标

    卸载ubuntu 下用wine安装的程序,可以用wine uninstaller命令,打开 添加/删除程序界面,进行删除程序操作:

  9. codevs 1993 草地排水 USACO

    /*Dinic*/ #include<iostream> #include<cstdio> #include<cstring> #include<queue& ...

  10. visual studio中的一些实用的快捷键

    重置开发环境:使得开发工具恢复默认状态 方法:工具->导入和导出设置导向->重置所有设置->不保存 显示行号: 方法:工具->选项->所有语言->行号 在编程过程中 ...