Injector(org.apache.nutch.crawl.Injector):

  • 输入:种子列表文件所在的目录
  • 输出:crawldb(保存URL以及其相应信息的数据库)
  • 作用:把种子URL注入到crawldb

package org.apache.nutch.crawl;

import java.io.*;
import java.text.SimpleDateFormat;
import java.util.*;

// Commons Logging imports
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import org.apache.hadoop.io.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;

import org.apache.nutch.net.*;
import org.apache.nutch.metadata.Nutch;
import org.apache.nutch.scoring.ScoringFilterException;
import org.apache.nutch.scoring.ScoringFilters;
import org.apache.nutch.util.NutchConfiguration;
import org.apache.nutch.util.NutchJob;
import org.apache.nutch.util.TimingUtil;

/**
* This class takes a flat file of URLs and adds them to the of pages to be
* crawled. Useful for bootstrapping the system. The URL files contain one URL
* per line, optionally followed by custom metadata separated by tabs with the
* metadata key separated from the corresponding value by '='. <br>
* Note that some metadata keys are reserved : <br>
* - <i>nutch.score</i> : allows to set a custom score for a specific URL <br>
* - <i>nutch.fetchInterval</i> : allows to set a custom fetch interval for a
* specific URL <br>
* - <i>nutch.fetchInterval.fixed</i> : allows to set a custom fetch interval
* for a specific URL that is not changed by AdaptiveFetchSchedule <br>
* e.g. http://www.nutch.org/ \t nutch.score=10 \t nutch.fetchInterval=2592000
* \t userType=open_source
**/
public class Injector extends Configured implements Tool {
public static final Logger LOG = LoggerFactory.getLogger(Injector.class);

/** metadata key reserved for setting a custom score for a specific URL */
public static String nutchScoreMDName = "nutch.score";
/**
* metadata key reserved for setting a custom fetchInterval for a specific URL
*/
public static String nutchFetchIntervalMDName = "nutch.fetchInterval";
/**
* metadata key reserved for setting a fixed custom fetchInterval for a
* specific URL
*/
public static String nutchFixedFetchIntervalMDName = "nutch.fetchInterval.fixed";

/** Normalize and filter injected urls. */
public static class InjectMapper implements
Mapper<WritableComparable<?>, Text, Text, CrawlDatum> {
private URLNormalizers urlNormalizers;
private int interval;
private float scoreInjected;
private JobConf jobConf;
private URLFilters filters;
private ScoringFilters scfilters;
private long curTime;

public void configure(JobConf job) {
this.jobConf = job;
urlNormalizers = new URLNormalizers(job, URLNormalizers.SCOPE_INJECT);
interval = jobConf.getInt("db.fetch.interval.default", 2592000);
filters = new URLFilters(jobConf);
scfilters = new ScoringFilters(jobConf);
scoreInjected = jobConf.getFloat("db.score.injected", 1.0f);
curTime = job
.getLong("injector.current.time", System.currentTimeMillis());
}

public void close() {
}

public void map(WritableComparable<?> key, Text value,
OutputCollector<Text, CrawlDatum> output, Reporter reporter)
throws IOException {
String url = value.toString().trim(); // value is line of text

if (url != null && (url.length() == 0 || url.startsWith("#"))) {
/* Ignore line that start with # */
return;
}

// if tabs : metadata that could be stored
// must be name=value and separated by \t
float customScore = -1f;
int customInterval = interval;
int fixedInterval = -1;
Map<String, String> metadata = new TreeMap<String, String>();
if (url.indexOf("\t") != -1) {
String[] splits = url.split("\t");
url = splits[0];
for (int s = 1; s < splits.length; s++) {
// find separation between name and value
int indexEquals = splits[s].indexOf("=");
if (indexEquals == -1) {
// skip anything without a =
continue;
}
String metaname = splits[s].substring(0, indexEquals);
String metavalue = splits[s].substring(indexEquals + 1);
if (metaname.equals(nutchScoreMDName)) {
try {
customScore = Float.parseFloat(metavalue);
} catch (NumberFormatException nfe) {
}
} else if (metaname.equals(nutchFetchIntervalMDName)) {
try {
customInterval = Integer.parseInt(metavalue);
} catch (NumberFormatException nfe) {
}
} else if (metaname.equals(nutchFixedFetchIntervalMDName)) {
try {
fixedInterval = Integer.parseInt(metavalue);
} catch (NumberFormatException nfe) {
}
} else
metadata.put(metaname, metavalue);
}
}
try {
url = urlNormalizers.normalize(url, URLNormalizers.SCOPE_INJECT);
url = filters.filter(url); // filter the url
} catch (Exception e) {
if (LOG.isWarnEnabled()) {
LOG.warn("Skipping " + url + ":" + e);
}
url = null;
}
if (url == null) {
reporter.getCounter("injector", "urls_filtered").increment(1);
} else { // if it passes
value.set(url); // collect it
CrawlDatum datum = new CrawlDatum();
datum.setStatus(CrawlDatum.STATUS_INJECTED);

// Is interval custom? Then set as meta data
if (fixedInterval > -1) {
// Set writable using float. Flaot is used by
// AdaptiveFetchSchedule
datum.getMetaData().put(Nutch.WRITABLE_FIXED_INTERVAL_KEY,
new FloatWritable(fixedInterval));
datum.setFetchInterval(fixedInterval);
} else {
datum.setFetchInterval(customInterval);
}

datum.setFetchTime(curTime);
// now add the metadata
Iterator<String> keysIter = metadata.keySet().iterator();
while (keysIter.hasNext()) {
String keymd = keysIter.next();
String valuemd = metadata.get(keymd);
datum.getMetaData().put(new Text(keymd), new Text(valuemd));
}
if (customScore != -1)
datum.setScore(customScore);
else
datum.setScore(scoreInjected);
try {
scfilters.injectedScore(value, datum);
} catch (ScoringFilterException e) {
if (LOG.isWarnEnabled()) {
LOG.warn("Cannot filter injected score for url " + url
+ ", using default (" + e.getMessage() + ")");
}
}
reporter.getCounter("injector", "urls_injected").increment(1);
output.collect(value, datum);
}
}
}

/** Combine multiple new entries for a url. */
public static class InjectReducer implements
Reducer<Text, CrawlDatum, Text, CrawlDatum> {
private int interval;
private float scoreInjected;
private boolean overwrite = false;
private boolean update = false;

public void configure(JobConf job) {
interval = job.getInt("db.fetch.interval.default", 2592000);
scoreInjected = job.getFloat("db.score.injected", 1.0f);
overwrite = job.getBoolean("db.injector.overwrite", false);
update = job.getBoolean("db.injector.update", false);
LOG.info("Injector: overwrite: " + overwrite);
LOG.info("Injector: update: " + update);
}

public void close() {
}

private CrawlDatum old = new CrawlDatum();
private CrawlDatum injected = new CrawlDatum();

public void reduce(Text key, Iterator<CrawlDatum> values,
OutputCollector<Text, CrawlDatum> output, Reporter reporter)
throws IOException {
boolean oldSet = false;
boolean injectedSet = false;
while (values.hasNext()) {
CrawlDatum val = values.next();
if (val.getStatus() == CrawlDatum.STATUS_INJECTED) {
injected.set(val);
injected.setStatus(CrawlDatum.STATUS_DB_UNFETCHED);
injectedSet = true;
} else {
old.set(val);
oldSet = true;
}

}

CrawlDatum res = null;

// Old default behaviour
if (injectedSet && !oldSet) {
res = injected;
} else {
res = old;
}
if (injectedSet && oldSet) {
reporter.getCounter("injector", "urls_merged").increment(1);
}
/**
* Whether to overwrite, ignore or update existing records
*
* @see https://issues.apache.org/jira/browse/NUTCH-1405
*/
// Injected record already exists and update but not overwrite
if (injectedSet && oldSet && update && !overwrite) {
res = old;
old.putAllMetaData(injected);
old.setScore(injected.getScore() != scoreInjected ? injected.getScore()
: old.getScore());
old.setFetchInterval(injected.getFetchInterval() != interval ? injected
.getFetchInterval() : old.getFetchInterval());
}

// Injected record already exists and overwrite
if (injectedSet && oldSet && overwrite) {
res = injected;
}

output.collect(key, res);
}
}

public Injector() {
}

public Injector(Configuration conf) {
setConf(conf);
}

public void inject(Path crawlDb, Path urlDir) throws IOException {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
long start = System.currentTimeMillis();
if (LOG.isInfoEnabled()) {
LOG.info("Injector: starting at " + sdf.format(start));
LOG.info("Injector: crawlDb: " + crawlDb);
LOG.info("Injector: urlDir: " + urlDir);
}

Path tempDir = new Path(getConf().get("mapred.temp.dir", ".")
+ "/inject-temp-"
+ Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));

// map text input file to a <url,CrawlDatum> file
if (LOG.isInfoEnabled()) {
LOG.info("Injector: Converting injected urls to crawl db entries.");
}

FileSystem fs = FileSystem.get(getConf());
// determine if the crawldb already exists
boolean dbExists = fs.exists(crawlDb);

JobConf sortJob = new NutchJob(getConf());
sortJob.setJobName("inject " + urlDir);
FileInputFormat.addInputPath(sortJob, urlDir);
sortJob.setMapperClass(InjectMapper.class);

FileOutputFormat.setOutputPath(sortJob, tempDir);
if (dbExists) {
// Don't run merge injected urls, wait for merge with
// existing DB
sortJob.setOutputFormat(SequenceFileOutputFormat.class);
sortJob.setNumReduceTasks(0);
} else {
sortJob.setOutputFormat(MapFileOutputFormat.class);
sortJob.setReducerClass(InjectReducer.class);
sortJob.setBoolean("mapreduce.fileoutputcommitter.marksuccessfuljobs",
false);
}
sortJob.setOutputKeyClass(Text.class);
sortJob.setOutputValueClass(CrawlDatum.class);
sortJob.setLong("injector.current.time", System.currentTimeMillis());

RunningJob mapJob = null;
try {
mapJob = JobClient.runJob(sortJob);
} catch (IOException e) {
fs.delete(tempDir, true);
throw e;
}
long urlsInjected = mapJob.getCounters()
.findCounter("injector", "urls_injected").getValue();
long urlsFiltered = mapJob.getCounters()
.findCounter("injector", "urls_filtered").getValue();
LOG.info("Injector: Total number of urls rejected by filters: "
+ urlsFiltered);
LOG.info("Injector: Total number of urls after normalization: "
+ urlsInjected);
long urlsMerged = 0;
if (dbExists) {
// merge with existing crawl db
if (LOG.isInfoEnabled()) {
LOG.info("Injector: Merging injected urls into crawl db.");
}
JobConf mergeJob = CrawlDb.createJob(getConf(), crawlDb);
FileInputFormat.addInputPath(mergeJob, tempDir);
mergeJob.setReducerClass(InjectReducer.class);
try {
RunningJob merge = JobClient.runJob(mergeJob);
urlsMerged = merge.getCounters().findCounter("injector", "urls_merged")
.getValue();
LOG.info("Injector: URLs merged: " + urlsMerged);
} catch (IOException e) {
fs.delete(tempDir, true);
throw e;
}
CrawlDb.install(mergeJob, crawlDb);
} else {
CrawlDb.install(sortJob, crawlDb);
}

// clean up
fs.delete(tempDir, true);
LOG.info("Injector: Total new urls injected: "
+ (urlsInjected - urlsMerged));
long end = System.currentTimeMillis();
LOG.info("Injector: finished at " + sdf.format(end) + ", elapsed: "
+ TimingUtil.elapsedTime(start, end));
}

public static void main(String[] args) throws Exception {
int res = ToolRunner.run(NutchConfiguration.create(), new Injector(), args);
System.exit(res);
}

public int run(String[] args) throws Exception {
if (args.length < 2) {
System.err.println("Usage: Injector <crawldb> <url_dir>");
return -1;
}
try {
inject(new Path(args[0]), new Path(args[1]));
return 0;
} catch (Exception e) {
LOG.error("Injector: " + StringUtils.stringifyException(e));
return -1;
}
}

}

Nutch主要类代码分析之一(Injector)的更多相关文章

  1. cocos2d-x v3.2 FlappyBird 各个类对象详细代码分析(6)

    今天我们要讲三个类,这三个类应该算比較简单的 HelpLayer类 NumberLayer类 GetLocalScore类 HelpLayer类,主要放了两个图形精灵上去,一个是游戏的名字,一个是提示 ...

  2. 完整全面的Java资源库(包括构建、操作、代码分析、编译器、数据库、社区等等)

    构建 这里搜集了用来构建应用程序的工具. Apache Maven:Maven使用声明进行构建并进行依赖管理,偏向于使用约定而不是配置进行构建.Maven优于Apache Ant.后者采用了一种过程化 ...

  3. Android代码分析工具lint学习

    1 lint简介 1.1 概述 lint是随Android SDK自带的一个静态代码分析工具.它用来对Android工程的源文件进行检查,找出在正确性.安全.性能.可使用性.可访问性及国际化等方面可能 ...

  4. MapReduce剖析笔记之八: Map输出数据的处理类MapOutputBuffer分析

    在上一节我们分析了Child子进程启动,处理Map.Reduce任务的主要过程,但对于一些细节没有分析,这一节主要对MapOutputBuffer这个关键类进行分析. MapOutputBuffer顾 ...

  5. pmd静态代码分析

    在正式进入测试之前,进行一定的静态代码分析及code review对代码质量及系统提高是有帮助的,以上为数据证明 Pmd 它是一个基于静态规则集的Java源码分析器,它可以识别出潜在的如下问题:– 可 ...

  6. [Asp.net 5] DependencyInjection项目代码分析4-微软的实现(5)(IEnumerable<>补充)

    Asp.net 5的依赖注入注入系列可以参考链接: [Asp.net 5] DependencyInjection项目代码分析-目录 我们在之前讲微软的实现时,对于OpenIEnumerableSer ...

  7. 常用 Java 静态代码分析工具的分析与比较

    常用 Java 静态代码分析工具的分析与比较 简介: 本文首先介绍了静态代码分析的基 本概念及主要技术,随后分别介绍了现有 4 种主流 Java 静态代码分析工具 (Checkstyle,FindBu ...

  8. angular代码分析之异常日志设计

    angular代码分析之异常日志设计 错误异常是面向对象开发中的记录提示程序执行问题的一种重要机制,在程序执行发生问题的条件下,异常会在中断程序执行,同时会沿着代码的执行路径一步一步的向上抛出异常,最 ...

  9. [Asp.net 5] DependencyInjection项目代码分析4-微软的实现(2)

    在 DependencyInjection项目代码分析4-微软的实现(1)中介绍了“ServiceTable”.“ServiceEntry”.“IGenericService”.“IService”. ...

随机推荐

  1. Log4j 用法

    一.Log4j基本使用方法 Log4j由三个重要的组件构成:日志信息的优先级,日志信息的输出目的地,日志信息的输出格式.日志信息的优先级从高到低有ERROR.WARN. INFO.DEBUG,分别用来 ...

  2. wpf学习笔记

    1.菜单:普通菜单.上下文菜单(ContextMenu) <Menu HorizontalAlignment="Left" Height="20" Ver ...

  3. CPU的大小端模式

    不同体系结构的CPU,数据在内存中存放的排列顺序是不一样的. 存储器中对数据的存储是以字节(Byte)为基本单位的,因此,字(Word)和半字(Half-Word)在存储器中就有两种次序,分别称为:大 ...

  4. 输出MYSQL所有SQL语句

    在my.cnf中的mysqld段增加如下参数,然后重启MYSQL: log-output = FILE general_log = 1 general_log_file = "D:/Visu ...

  5. Mac上安装与更新Ruby,Rails运行环境

    Mac安装后就安装Xcode是个好主意,它将帮你安装好Unix环境需要的开发包,也可以独立安装command_line_tools_for_xcode 1.安装RVM RVM:Ruby Version ...

  6. 关于HttpURLConnection.setFollowRedirects

    public static void HttpURLConnection.setFollowRedirects(boolean followRedirects)public void HttpURLC ...

  7. Java 数组声明与初始化

    引言 学习了好久的java,每次要写数组的声明和初始化代码,总是理不清.最近又碰到了一次这种情况.这次拿出<Thinking In Java>好好总结一翻. 数组声明 对于数组的声明其实都 ...

  8. maven的使用体验

    由于之前是从C++代码阵营中转到java阵营的(这里可能还会转到scala阵营中).在写工程代码的时候,如果是只是简单的创建java project的话,当然也是可以的.但是,随着项目慢慢扩大,组件越 ...

  9. Objective-C学习笔记-第四天(1)

    解决以下昨天遇到的问题 1.@class与import是怎么样的呢?参考:http://www.cnblogs.com/ios8/p/ios-oc-test.html 在头文件中, 一般只需要知道被引 ...

  10. iOS thirdKeyboard Develop (APP Extension)

    如果需要开发第三方键盘 首先得了解一下苹果官方文档  https://developer.apple.com/library/ios/documentation/General/Conceptual/ ...