这里使用的Lucene4.7.0和Lucene3.X稍有不同

有下面三段内容,我想对船一系列的搜索进行加分

  bike car jeep truck bus boat

  train car ship boat van subway

  car plane taxi boat vessel railway

  • 定义自定义的MyAnalyzer,实现对字段的有效载荷进行赋值
 package com.pera.lucene.score.payload;

 import java.io.Reader;

 import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.payloads.PayloadEncoder;
import org.apache.lucene.util.Version; public class MyAnalyzer extends Analyzer
{ private PayloadEncoder encoder; MyAnalyzer(PayloadEncoder encoder)
{
this.encoder = encoder;
} @Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader)
{
// 用来解析空格分隔的各个类别
Tokenizer source = new WhitespaceTokenizer(Version.LUCENE_47, reader);
// 自定义的Filter,用来获取字段的Payload值
MyTokenFilter filter = new MyTokenFilter(source, encoder); return new TokenStreamComponents(source, filter);
} }
  • 自定义TokenFilter来达到取得字段的PayLoad值或通过字段对PayLoad值进行分析赋值
 package com.pera.lucene.score.payload;

 import java.io.IOException;

 import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.payloads.PayloadEncoder;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; public class MyTokenFilter extends TokenFilter
{
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final PayloadAttribute payAtt = addAttribute(PayloadAttribute.class);
private final PayloadEncoder encoder; public MyTokenFilter(TokenStream input, PayloadEncoder encoder)
{
super(input);
this.encoder = encoder;
} @Override
public boolean incrementToken() throws IOException
{
if (input.incrementToken())
{
String term = termAtt.toString();
if (App.scoreMap.containsKey(term))
{
payAtt.setPayload(encoder.encode(App.scoreMap.get(term).toCharArray()));
} else
{
payAtt.setPayload(null);
}
return true;
} else
return false;
} }
     public static ImmutableMap<String, String> scoreMap = ImmutableMap.of("boat", "5f", "ship", "20f", "vessel", "100f");
  • 自定义PayloadSimilarity继承DefaultSimilarity 重载scorePayload方法,在检索时获得之前设置的PayLoad值
 package com.pera.lucene.score.payload;

 import org.apache.lucene.analysis.payloads.PayloadHelper;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.util.BytesRef; public class PayloadSimilarity extends DefaultSimilarity
{
@Override
public float scorePayload(int doc, int start, int end, BytesRef payload)
{
return PayloadHelper.decodeFloat(payload.bytes);
}
}
  • 建立索引 需要将之前定义的Analyzer和PayloadSimilarity设置到Config中
 package com.pera.lucene.score.payload;

 import java.io.File;
import java.io.IOException;
import java.util.Date; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.payloads.FloatEncoder;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version; public class Indexing
{
public void indexPayload() throws IOException
{
Directory dir = FSDirectory.open(new File(App.indexPath));
Analyzer analyzer = new MyAnalyzer(new FloatEncoder());
Similarity similarity = new PayloadSimilarity(); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_47, analyzer);
iwc.setOpenMode(OpenMode.CREATE).setSimilarity(similarity);
Date start = new Date();
System.out.println("Indexing to directory '" + App.indexPath + "'...");
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new TextField("tools", "bike car jeep truck bus boat", Store.YES));
writer.addDocument(doc); doc = new Document();
doc.add(new TextField("tools", "train car ship boat van subway", Store.YES));
writer.addDocument(doc); doc = new Document();
doc.add(new TextField("tools", "car plane taxi boat vessel railway", Store.YES));
writer.addDocument(doc); writer.close(); Date end = new Date();
System.out.println(end.getTime() - start.getTime() + " total milliseconds");
}
}
  • 进行检索 检索时要将PayloadSimilarity设置到searcher中
 package com.pera.lucene.score.payload;

 import java.io.File;
import java.io.IOException; import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.payloads.AveragePayloadFunction;
import org.apache.lucene.search.payloads.PayloadTermQuery;
import org.apache.lucene.store.FSDirectory; public class Searching
{ public void searchPayload() throws IOException, ParseException
{
IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(App.indexPath)));
IndexSearcher searcher = new IndexSearcher(reader); BooleanQuery bq = new BooleanQuery(); PayloadTermQuery ptq1 = new PayloadTermQuery(new Term("tools", "ship"), new AveragePayloadFunction());
PayloadTermQuery ptq2 = new PayloadTermQuery(new Term("tools", "boat"), new AveragePayloadFunction());
PayloadTermQuery ptq3 = new PayloadTermQuery(new Term("tools", "vessel"), new AveragePayloadFunction()); bq.add(ptq1, Occur.SHOULD);
bq.add(ptq2, Occur.SHOULD);
bq.add(ptq3, Occur.SHOULD); // 设置自定义的PayloadSimilarity
searcher.setSimilarity(new PayloadSimilarity());
TopDocs results = searcher.search(bq, 10);
ScoreDoc[] hits = results.scoreDocs; int numTotalHits = results.totalHits;
System.out.println(numTotalHits + " total matching documents"); for (int i = 0; i < hits.length; i++)
{
int docId = hits[i].doc; // 文档编号
float lucene_score = hits[i].score;
String tools = searcher.doc(docId).get("tools");
System.out.println("DocId:" + docId + "\tLucene Score:" + lucene_score + "\tTools:" + tools);
Explanation explanation = searcher.explain(bq, docId);
System.out.println(explanation.toString());
}
}
}
  • 检索结果 可以看到Doc2的排序由于有了PayLoad值排名得到了提升
3 total matching documents
DocId:2 Lucene Score:16.750757 Tools:car plane taxi boat vessel railway
16.750757 = (MATCH) product of:
25.126135 = (MATCH) sum of:
0.3186112 = (MATCH) btq, product of:
0.06372224 = weight(tools:boat in 2) [PayloadSimilarity], result of:
0.06372224 = score(doc=2,freq=0.5 = phraseFreq=0.5
), product of:
0.33736566 = queryWeight, product of:
0.71231794 = idf(docFreq=3, maxDocs=3)
0.4736167 = queryNorm
0.18888181 = fieldWeight in 2, product of:
0.70710677 = tf(freq=0.5), with freq of:
0.5 = phraseFreq=0.5
0.71231794 = idf(docFreq=3, maxDocs=3)
0.375 = fieldNorm(doc=2)
5.0 = AveragePayloadFunction.docScore()
24.807524 = (MATCH) btq, product of:
0.24807523 = weight(tools:vessel in 2) [PayloadSimilarity], result of:
0.24807523 = score(doc=2,freq=0.5 = phraseFreq=0.5
), product of:
0.66565174 = queryWeight, product of:
1.4054651 = idf(docFreq=1, maxDocs=3)
0.4736167 = queryNorm
0.37268022 = fieldWeight in 2, product of:
0.70710677 = tf(freq=0.5), with freq of:
0.5 = phraseFreq=0.5
1.4054651 = idf(docFreq=1, maxDocs=3)
0.375 = fieldNorm(doc=2)
100.0 = AveragePayloadFunction.docScore()
0.6666667 = coord(2/3) DocId:1 Lucene Score:3.5200772 Tools:train car ship boat van subway
3.5200772 = (MATCH) product of:
5.2801156 = (MATCH) sum of:
4.9615045 = (MATCH) btq, product of:
0.24807523 = weight(tools:ship in 1) [PayloadSimilarity], result of:
0.24807523 = score(doc=1,freq=0.5 = phraseFreq=0.5
), product of:
0.66565174 = queryWeight, product of:
1.4054651 = idf(docFreq=1, maxDocs=3)
0.4736167 = queryNorm
0.37268022 = fieldWeight in 1, product of:
0.70710677 = tf(freq=0.5), with freq of:
0.5 = phraseFreq=0.5
1.4054651 = idf(docFreq=1, maxDocs=3)
0.375 = fieldNorm(doc=1)
20.0 = AveragePayloadFunction.docScore()
0.3186112 = (MATCH) btq, product of:
0.06372224 = weight(tools:boat in 1) [PayloadSimilarity], result of:
0.06372224 = score(doc=1,freq=0.5 = phraseFreq=0.5
), product of:
0.33736566 = queryWeight, product of:
0.71231794 = idf(docFreq=3, maxDocs=3)
0.4736167 = queryNorm
0.18888181 = fieldWeight in 1, product of:
0.70710677 = tf(freq=0.5), with freq of:
0.5 = phraseFreq=0.5
0.71231794 = idf(docFreq=3, maxDocs=3)
0.375 = fieldNorm(doc=1)
5.0 = AveragePayloadFunction.docScore()
0.6666667 = coord(2/3) DocId:0 Lucene Score:0.106203735 Tools:bike car jeep truck bus boat
0.106203735 = (MATCH) product of:
0.3186112 = (MATCH) sum of:
0.3186112 = (MATCH) btq, product of:
0.06372224 = weight(tools:boat in 0) [PayloadSimilarity], result of:
0.06372224 = score(doc=0,freq=0.5 = phraseFreq=0.5
), product of:
0.33736566 = queryWeight, product of:
0.71231794 = idf(docFreq=3, maxDocs=3)
0.4736167 = queryNorm
0.18888181 = fieldWeight in 0, product of:
0.70710677 = tf(freq=0.5), with freq of:
0.5 = phraseFreq=0.5
0.71231794 = idf(docFreq=3, maxDocs=3)
0.375 = fieldNorm(doc=0)
5.0 = AveragePayloadFunction.docScore()
0.33333334 = coord(1/3)

Lucene 评分机制二 Payload的更多相关文章

  1. Apache Lucene评分机制的内部工作原理

    Apache Lucene评分机制的内部工作原理' 第5章

  2. Lucene 评分机制一

    1. 评分公式 1.1 公式介绍 这个公式是Lucene实际计算时使用的公式,是由原型公式推导而来 tf(t in d) 表示某个term的出现频率,定义了term t出现在当前document d的 ...

  3. lucene 的评分机制

    lucene 的评分机制 elasticsearch是基于lucene的,所以他的评分机制也是基于lucene的.评分就是我们搜索的短语和索引中每篇文档的相关度打分. 如果没有干预评分算法的时候,每次 ...

  4. Lucene Scoring 评分机制

    原文出处:http://blog.chenlb.com/2009/08/lucene-scoring-architecture.html Lucene 评分体系/机制(lucene scoring)是 ...

  5. Lucene 的 Scoring 评分机制

    转自: http://www.oschina.net/question/5189_7707  Lucene 评分体系/机制(lucene scoring)是 Lucene 出名的一核心部分.它对用户来 ...

  6. Solr4.8.0源码分析(19)之缓存机制(二)

    Solr4.8.0源码分析(19)之缓存机制(二) 前文<Solr4.8.0源码分析(18)之缓存机制(一)>介绍了Solr缓存的生命周期,重点介绍了Solr缓存的warn过程.本节将更深 ...

  7. Solr In Action 笔记(2) 之 评分机制(相似性计算)

    Solr In Action 笔记(2) 之评分机制(相似性计算) 1 简述 我们对搜索引擎进行查询时候,很少会有人进行翻页操作.这就要求我们对索引的内容提取具有高度的匹配性,这就搜索引擎文档的相似性 ...

  8. Elasticseach的评分机制

    lucene 的评分机制 elasticsearch是基于lucene的,所以他的评分机制也是基于lucene的.评分就是我们搜索的短语和索引中每篇文档的相关度打分. 如果没有干预评分算法的时候,每次 ...

  9. Wifi 评分机制分析

    从android N开始,引入了wifi评分机制,选择wifi的时候会通过评分来选择. android O源码 frameworks\opt\net\wifi\service\java\com\and ...

随机推荐

  1. Html标签学习笔记二

    1.常用标签 <a></a>超链接    功能    做链接 :在href属性里面写明指向的地方        做下载:href指向文件(注意:不能下载的文件是因为浏览器可以直 ...

  2. R语言 循环

    R语言循环 可能有一种情况,当你需要执行一段代码几次. 通常,顺序执行语句. 首先执行函数中的第一个语句,然后执行第二个语句,依此类推. 编程语言提供允许更复杂的执行路径的各种控制结构. 循环语句允许 ...

  3. System.Web.Mvc.HttpHeadAttribute.cs

    ylbtech-System.Web.Mvc.HttpHeadAttribute.cs 1.程序集 System.Web.Mvc, Version=5.2.3.0, Culture=neutral, ...

  4. cordova开发笔记

    搜狐邮箱APP 使用了cordova框架,遇到了一些列问题,稍微总结记录下 扩展支持appInBrowser,用来以新窗口方式打开外链url 解决跨域问题(cordova默认当前域为localhost ...

  5. 从零开始学习jQuery (六) jquery中的AJAX使用

    本篇文章讲解如何使用jQuery方便快捷的实现Ajax功能.统一所有开发人员使用Ajax的方式. 一.摘要 本系列文章将带您进入jQuery的精彩世界, 其中有很多作者具体的使用经验和解决方案,  即 ...

  6. 多边形游戏 /// 区间DP oj1903

    题目大意: ... Input 输入的第一行是单独一个整数n( 3 ≤ n ≤ 18 ),表示多边形的顶点数(同时也是边数). 接下来第n行,每行包含一个运算符("+"或" ...

  7. springboot整合mybatis进行跨库查询

    业务场景: 当一个公司大了之后就会将各种业务进行分开,最简单的就是例如:公司的机构表,那么就会将他们分成开来,那么就会在一个实例中, 如果要获取相关信息就会去关联这张表进行关联查询 从而导致了跨库关联 ...

  8. 8.关于ActiveMQ、RocketMQ、RabbitMQ、Kafka一些总结和区别

    这是一篇分享文 转自:http://www.cnblogs.com/williamjie/p/9481780.html  尊重原作,谢谢 消息队列 为什么写这篇文章? 博主有两位朋友分别是小A和小B: ...

  9. Python 字符串切片(slice)

    切片操作(slice)可以从一个字符串中获取子字符串(字符串的一部分).我们使用一对方括号.起始偏移量start.终止偏移量end 以及可选的步长step 来定义一个分片. 格式: [start:en ...

  10. 小程序中template的用法

    demo案例: wxml代码: <view> <text>template使用demo</text> <!-- <view wx:for="{ ...