lucene.net helper类 【结合盘古分词进行搜索的小例子(分页功能)】
转自:http://blog.csdn.net/pukuimin1226/article/details/17558247
添加:2013-12-25
更新:2013-12-26 新增分页功能。
更新:2013-12-27 新增按分类查询功能,调整索引行新增记录的图片字段。
最新盘古分词dll和词典管理工具下载:http://pangusegment.codeplex.com/
词典下载:http://pangusegment.codeplex.com/releases/view/47411
Lucene.net下载:http://lucenenet.apache.org/download.cgi
//封装类
- using System;
- using System.Collections.Generic;
- using System.Linq;
- using System.Web;
- using Lucene.Net.Analysis;
- using Lucene.Net.Index;
- using Lucene.Net.Documents;
- using System.Reflection;
- using Lucene.Net.QueryParsers;
- using Lucene.Net.Search;
- namespace SearchTest
- {
- /// <summary>
- /// 盘古分词在lucene.net中的使用帮助类
- /// 调用PanGuLuceneHelper.instance
- /// </summary>
- public class PanGuLuceneHelper
- {
- private PanGuLuceneHelper() { }
- #region 单一实例
- private static PanGuLuceneHelper _instance = null;
- /// <summary>
- /// 单一实例
- /// </summary>
- public static PanGuLuceneHelper instance
- {
- get
- {
- if (_instance == null) _instance = new PanGuLuceneHelper();
- return _instance;
- }
- }
- #endregion
- #region 分词测试
- /// <summary>
- /// 分词测试
- /// </summary>
- /// <param name="keyword"></param>
- /// <returns></returns>
- public string Token(string keyword)
- {
- string ret = "";
- System.IO.StringReader reader = new System.IO.StringReader(keyword);
- Lucene.Net.Analysis.TokenStream ts = analyzer.TokenStream(keyword, reader);
- bool hasNext = ts.IncrementToken();
- Lucene.Net.Analysis.Tokenattributes.ITermAttribute ita;
- while (hasNext)
- {
- ita = ts.GetAttribute<Lucene.Net.Analysis.Tokenattributes.ITermAttribute>();
- ret += ita.Term + "|";
- hasNext = ts.IncrementToken();
- }
- ts.CloneAttributes();
- reader.Close();
- analyzer.Close();
- return ret;
- }
- #endregion
- #region 创建索引
- /// <summary>
- /// 创建索引
- /// </summary>
- /// <param name="datalist"></param>
- /// <returns></returns>
- public bool CreateIndex(List<MySearchUnit> datalist)
- {
- IndexWriter writer = null;
- try
- {
- writer = new IndexWriter(directory_luce, analyzer, false, IndexWriter.MaxFieldLength.LIMITED);//false表示追加(true表示删除之前的重新写入)
- }
- catch
- {
- writer = new IndexWriter(directory_luce, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);//false表示追加(true表示删除之前的重新写入)
- }
- foreach (MySearchUnit data in datalist)
- {
- CreateIndex(writer, data);
- }
- writer.Optimize();
- writer.Dispose();
- return true;
- }
- public bool CreateIndex(IndexWriter writer, MySearchUnit data)
- {
- try
- {
- if (data == null) return false;
- Document doc = new Document();
- Type type = data.GetType();//assembly.GetType("Reflect_test.PurchaseOrderHeadManageModel", true, true); //命名空间名称 + 类名
- //创建类的实例
- //object obj = Activator.CreateInstance(type, true);
- //获取公共属性
- PropertyInfo[] Propertys = type.GetProperties();
- for (int i = 0; i < Propertys.Length; i++)
- {
- //Propertys[i].SetValue(Propertys[i], i, null); //设置值
- PropertyInfo pi = Propertys[i];
- string name=pi.Name;
- object objval = pi.GetValue(data, null);
- string value = objval == null ? "" : objval.ToString(); //值
- if (name == "id" || name=="flag" )//id在写入索引时必是不分词,否则是模糊搜索和删除,会出现混乱
- {
- doc.Add(new Field(name, value, Field.Store.YES, Field.Index.NOT_ANALYZED));//id不分词
- }
- else
- {
- doc.Add(new Field(name, value, Field.Store.YES, Field.Index.ANALYZED));
- }
- }
- writer.AddDocument(doc);
- }
- catch (System.IO.FileNotFoundException fnfe)
- {
- throw fnfe;
- }
- return true;
- }
- #endregion
- #region 在title和content字段中查询数据
- /// <summary>
- /// 在title和content字段中查询数据
- /// </summary>
- /// <param name="keyword"></param>
- /// <returns></returns>
- public List<MySearchUnit> Search(string keyword)
- {
- string[] fileds = { "title", "content" };//查询字段
- //Stopwatch st = new Stopwatch();
- //st.Start();
- QueryParser parser = null;// new QueryParser(Lucene.Net.Util.Version.LUCENE_30, field, analyzer);//一个字段查询
- parser = new MultiFieldQueryParser(version, fileds, analyzer);//多个字段查询
- Query query = parser.Parse(keyword);
- int n = 1000;
- IndexSearcher searcher = new IndexSearcher(directory_luce, true);//true-表示只读
- TopDocs docs = searcher.Search(query, (Filter)null, n);
- if (docs == null || docs.TotalHits == 0)
- {
- return null;
- }
- else
- {
- List<MySearchUnit> list = new List<MySearchUnit>();
- int counter = 1;
- foreach (ScoreDoc sd in docs.ScoreDocs)//遍历搜索到的结果
- {
- try
- {
- Document doc = searcher.Doc(sd.Doc);
- string id = doc.Get("id");
- string title = doc.Get("title");
- string content = doc.Get("content");
- string flag = doc.Get("flag");
- string imageurl = doc.Get("imageurl");
- string updatetime = doc.Get("updatetime");
- string createdate = doc.Get("createdate");
- PanGu.HighLight.SimpleHTMLFormatter simpleHTMLFormatter = new PanGu.HighLight.SimpleHTMLFormatter("<font color=\"red\">", "</font>");
- PanGu.HighLight.Highlighter highlighter = new PanGu.HighLight.Highlighter(simpleHTMLFormatter, new PanGu.Segment());
- highlighter.FragmentSize = 50;
- content = highlighter.GetBestFragment(keyword, content);
- string titlehighlight = highlighter.GetBestFragment(keyword, title);
- if (titlehighlight != "") title = titlehighlight;
- list.Add(new MySearchUnit(id, title, content, flag,imageurl, updatetime));
- }
- catch (Exception ex)
- {
- Console.WriteLine(ex.Message);
- }
- counter++;
- }
- return list;
- }
- //st.Stop();
- //Response.Write("查询时间:" + st.ElapsedMilliseconds + " 毫秒<br/>");
- }
- #endregion
- #region 在不同的分类下再根据title和content字段中查询数据(分页)
- /// <summary>
- /// 在不同的类型下再根据title和content字段中查询数据(分页)
- /// </summary>
- /// <param name="_flag">分类,传空值查询全部</param>
- /// <param name="keyword"></param>
- /// <param name="PageIndex"></param>
- /// <param name="PageSize"></param>
- /// <param name="TotalCount"></param>
- /// <returns></returns>
- public List<MySearchUnit> Search(string _flag,string keyword, int PageIndex, int PageSize, out int TotalCount)
- {
- if (PageIndex < 1) PageIndex = 1;
- //Stopwatch st = new Stopwatch();
- //st.Start();
- BooleanQuery bq = new BooleanQuery();
- if (_flag != "")
- {
- QueryParser qpflag = new QueryParser(version, "flag", analyzer);
- Query qflag = qpflag.Parse(_flag);
- bq.Add(qflag, Occur.MUST);//与运算
- }
- if (keyword != "")
- {
- string[] fileds = { "title", "content" };//查询字段
- QueryParser parser = null;// new QueryParser(version, field, analyzer);//一个字段查询
- parser = new MultiFieldQueryParser(version, fileds, analyzer);//多个字段查询
- Query queryKeyword = parser.Parse(keyword);
- bq.Add(queryKeyword, Occur.MUST);//与运算
- }
- TopScoreDocCollector collector = TopScoreDocCollector.Create(PageIndex * PageSize, false);
- IndexSearcher searcher = new IndexSearcher(directory_luce, true);//true-表示只读
- searcher.Search(bq, collector);
- if (collector == null || collector.TotalHits == 0)
- {
- TotalCount = 0;
- return null;
- }
- else
- {
- int start = PageSize * (PageIndex - 1);
- //结束数
- int limit = PageSize;
- ScoreDoc[] hits = collector.TopDocs(start, limit).ScoreDocs;
- List<MySearchUnit> list = new List<MySearchUnit>();
- int counter = 1;
- TotalCount = collector.TotalHits;
- foreach (ScoreDoc sd in hits)//遍历搜索到的结果
- {
- try
- {
- Document doc = searcher.Doc(sd.Doc);
- string id = doc.Get("id");
- string title = doc.Get("title");
- string content = doc.Get("content");
- string flag = doc.Get("flag");
- string imageurl = doc.Get("imageurl");
- string updatetime = doc.Get("updatetime");
- PanGu.HighLight.SimpleHTMLFormatter simpleHTMLFormatter = new PanGu.HighLight.SimpleHTMLFormatter("<font color=\"red\">", "</font>");
- PanGu.HighLight.Highlighter highlighter = new PanGu.HighLight.Highlighter(simpleHTMLFormatter, new PanGu.Segment());
- highlighter.FragmentSize = 50;
- content = highlighter.GetBestFragment(keyword, content);
- string titlehighlight = highlighter.GetBestFragment(keyword, title);
- if (titlehighlight != "") title = titlehighlight;
- list.Add(new MySearchUnit(id, title, content, flag,imageurl, updatetime));
- }
- catch (Exception ex)
- {
- Console.WriteLine(ex.Message);
- }
- counter++;
- }
- return list;
- }
- //st.Stop();
- //Response.Write("查询时间:" + st.ElapsedMilliseconds + " 毫秒<br/>");
- }
- #endregion
- #region 删除索引数据(根据id)
- /// <summary>
- /// 删除索引数据(根据id)
- /// </summary>
- /// <param name="id"></param>
- /// <returns></returns>
- public bool Delete(string id)
- {
- bool IsSuccess = false;
- Term term = new Term("id", id);
- //Analyzer analyzer = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30);
- //Version version = new Version();
- //MultiFieldQueryParser parser = new MultiFieldQueryParser(version, new string[] { "name", "job" }, analyzer);//多个字段查询
- //Query query = parser.Parse("小王");
- //IndexReader reader = IndexReader.Open(directory_luce, false);
- //reader.DeleteDocuments(term);
- //Response.Write("删除记录结果: " + reader.HasDeletions + "<br/>");
- //reader.Dispose();
- IndexWriter writer = new IndexWriter(directory_luce, analyzer, false, IndexWriter.MaxFieldLength.LIMITED);
- writer.DeleteDocuments(term); // writer.DeleteDocuments(term)或者writer.DeleteDocuments(query);
- ////writer.DeleteAll();
- writer.Commit();
- //writer.Optimize();//
- IsSuccess = writer.HasDeletions();
- writer.Dispose();
- return IsSuccess;
- }
- #endregion
- #region 删除全部索引数据
- /// <summary>
- /// 删除全部索引数据
- /// </summary>
- /// <returns></returns>
- public bool DeleteAll()
- {
- bool IsSuccess = true;
- try
- {
- IndexWriter writer = new IndexWriter(directory_luce, analyzer, false, IndexWriter.MaxFieldLength.LIMITED);
- writer.DeleteAll();
- writer.Commit();
- //writer.Optimize();//
- IsSuccess = writer.HasDeletions();
- writer.Dispose();
- }
- catch
- {
- IsSuccess = false;
- }
- return IsSuccess;
- }
- #endregion
- #region directory_luce
- private Lucene.Net.Store.Directory _directory_luce = null;
- /// <summary>
- /// Lucene.Net的目录-参数
- /// </summary>
- public Lucene.Net.Store.Directory directory_luce
- {
- get
- {
- if (_directory_luce == null) _directory_luce = Lucene.Net.Store.FSDirectory.Open(directory);
- return _directory_luce;
- }
- }
- #endregion
- #region directory
- private System.IO.DirectoryInfo _directory = null;
- /// <summary>
- /// 索引在硬盘上的目录
- /// </summary>
- public System.IO.DirectoryInfo directory
- {
- get
- {
- if (_directory == null)
- {
- string dirPath = AppDomain.CurrentDomain.BaseDirectory + "SearchIndex";
- if (System.IO.Directory.Exists(dirPath) == false) _directory = System.IO.Directory.CreateDirectory(dirPath);
- else _directory = new System.IO.DirectoryInfo(dirPath);
- }
- return _directory;
- }
- }
- #endregion
- #region analyzer
- private Analyzer _analyzer = null;
- /// <summary>
- /// 分析器
- /// </summary>
- public Analyzer analyzer
- {
- get
- {
- //if (_analyzer == null)
- {
- _analyzer = new Lucene.Net.Analysis.PanGu.PanGuAnalyzer();//盘古分词分析器
- //_analyzer = new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_30);//标准分析器
- }
- return _analyzer;
- }
- }
- #endregion
- #region version
- private static Lucene.Net.Util.Version _version = Lucene.Net.Util.Version.LUCENE_30;
- /// <summary>
- /// 版本号枚举类
- /// </summary>
- public Lucene.Net.Util.Version version
- {
- get
- {
- return _version;
- }
- }
- #endregion
- }
- #region 索引的一个行单元,相当于数据库中的一行数据
- /// <summary>
- /// 索引的一个行单元,相当于数据库中的一行数据
- /// </summary>
- public class MySearchUnit
- {
- public MySearchUnit(string _id, string _title, string _content, string _flag, string _imageurl, string _updatetime)
- {
- this.id = _id;
- this.title = _title;
- this.content = _content;
- this.flag = _flag;
- this.imageurl = _imageurl;
- this.updatetime = _updatetime;
- }
- /// <summary>
- /// 唯一的id号
- /// </summary>
- public string id { get; set; }
- /// <summary>
- /// 标题
- /// </summary>
- public string title { get; set; }
- /// <summary>
- /// 内容
- /// </summary>
- public string content { get; set; }
- /// <summary>
- /// 其他信息
- /// </summary>
- public string flag { get; set; }
- /// <summary>
- /// 图片路径
- /// </summary>
- public string imageurl { get; set; }
- /// <summary>
- /// 时间
- /// </summary>
- public string updatetime { get; set; }
- }
- #endregion
- }
//调用测试
- protected void Page_Load(object sender, EventArgs e)
- {
- //PanGuLuceneHelper.instance.DeleteAll();//删除全部
- //PanGuLuceneHelper.instance.Delete("1d");//根据id删除
- bool exec = false;
- if (exec)
- {
- List<MySearchUnit> list = new List<MySearchUnit>();
- list.Add(new MySearchUnit("1a", "标题小王", "今天是小王的生日,大家都很高兴去他家喝酒,玩了一整天。", new Random().Next(1, 10).ToString(), "", ""));
- list.Add(new MySearchUnit("1b", "标题小张", "今天是小张的生日,大家都很高兴去他家喝酒,玩了几天。", new Random().Next(1, 10).ToString(), "", ""));
- list.Add(new MySearchUnit("1c", "标题小王", "今天是小王的生日,大家都很高兴去他家喝酒,玩了一整天。", new Random().Next(1, 10).ToString(), "", ""));
- list.Add(new MySearchUnit("1d", "标题小张", "今天是小张的生日,大家都很高兴去他家喝酒,玩了几天。", new Random().Next(1, 10).ToString(), "", ""));
- PanGuLuceneHelper.instance.CreateIndex(list);//添加索引
- }
- int count = 0;
- int PageIndex=2;
- int PageSize=4;
- string html_content = "";
- List<MySearchUnit> searchlist = PanGuLuceneHelper.instance.Search("3","小王 生日",PageIndex,PageSize,out count);
- html_content+=("查询结果:" + count + "条数据<br/>");
- if (searchlist == null || searchlist.Count==0)
- {
- html_content += ("未查询到数据。<br/>");
- }
- else
- {
- foreach (MySearchUnit data in searchlist)
- {
- html_content += (string.Format("id:{0},title:{1},content:{2},flag:{3},updatetime:{4}<br/>", data.id, data.title, data.content, data.flag, data.updatetime));
- }
- }
- html_content += (PanGuLuceneHelper.instance.version);
- div_content.InnerHtml = html_content;
- }
//效果:
第一版源码示例下载:http://download.csdn.net/detail/pukuimin1226/6768179
最新源码示例下载:http://download.csdn.net/detail/pukuimin1226/6776049
百度云盘下载链接:http://pan.baidu.com/s/1o69cCD8
Lucene.Net没有判断数据重复性,同一条数据插入多少遍它就有多少条相同的数据,所以,我们人为地用id区分,在数据量大,全部重新创建索引时间长的情况下(数据量到几万以上就耗资源了,从数据库中查询出来,再写入索引,使得数据库和程序本身都增加负担),增量建立索引是很有必要的。
新增一条数据,就直接添加一条索引;
修改一条数据,先删除同一个id的索引(不管有多少个id相同的,都会一次性删除),再添加一条。
数据库中的id建议大家都用guid去掉“-”,还可以加日期“yyyyMMddHHmmss”这样组合,长度一致看起来美观,也充分保证唯一。
lucene.net helper类 【结合盘古分词进行搜索的小例子(分页功能)】的更多相关文章
- lucene.net 3.0.3、结合盘古分词进行搜索的小例子(转)
lucene.net 3.0.3.结合盘古分词进行搜索的小例子(分页功能) 添加:2013-12-25 更新:2013-12-26 新增分页功能. 更新:2013-12-27 新增按分类查询功能, ...
- lucene.net 3.0.3、结合盘古分词进行搜索的小例子(分页功能)
转自:http://blog.csdn.net/pukuimin1226/article/details/17558247 添加:2013-12-25 更新:2013-12-26 新增分页功能. 更新 ...
- 使用Lucene.net+盘古分词实现搜索查询
这里我的的Demo的逻辑是这样的:首先我基本的数据是储存在Sql数据库中,然后我把我的必需的数据推送到MongoDB中,这样再去利用Lucene.net+盘古创建索引:其中为什么要这样把数据推送到Mo ...
- Lucene.Net+盘古分词->开发自己的搜索引擎
//封装类 using System;using System.Collections.Generic;using System.Linq;using System.Web;using Lucene. ...
- Lucene.net入门学习(结合盘古分词)
Lucene简介 Lucene是apache软件基金会4 jakarta项目组的一个子项目,是一个开放源代码的全文检索引擎工具包,即它不是一个完整的全文检索引擎,而是一个全文检索引擎的架构,提供了完整 ...
- Lucene.net入门学习(结合盘古分词)(转载)
作者:释迦苦僧 出处:http://www.cnblogs.com/woxpp/p/3972233.html 本文版权归作者和博客园共有,欢迎转载,但未经作者同意必须保留此段声明,且在文章页面明显 ...
- 【原创】Lucene.Net+盘古分词器(详细介绍)
本章阅读概要 1.Lucenne.Net简介 2.介绍盘古分词器 3.Lucene.Net实例分析 4.结束语(Demo下载) Lucene.Net简介 Lucene.net是Lucene的.net移 ...
- 站内搜索——Lucene +盘古分词
为了方便的学习站内搜索,下面我来演示一个MVC项目. 1.首先在项目中[添加引入]三个程序集和[Dict]文件夹,并新建一个[分词内容存放目录] Lucene.Net.dll.PanGu.dll.Pa ...
- Lucene.Net+盘古分词器(详细介绍)(转)
出处:http://www.cnblogs.com/magicchaiy/archive/2013/06/07/LuceneNet%E7%9B%98%E5%8F%A4%E5%88%86%E8%AF%8 ...
随机推荐
- 决策树模型 ID3/C4.5/CART算法比较
决策树模型在监督学习中非常常见,可用于分类(二分类.多分类)和回归.虽然将多棵弱决策树的Bagging.Random Forest.Boosting等tree ensembel 模型更为常见,但是“完 ...
- mark资料-selenium断言的分类
操作(action).辅助(accessors)和断言(assertion): 操作action: 模拟用户与 Web 应用程序的交互. 辅助accessors: 这是辅助工具.用于检查应用程序的状态 ...
- JDBC接口规范
前言 JDBC(JavaDatabase Connectivity)表示Java查询引擎连接,由一组用Java编程语言编写的类和接口组成.JDBC为Java程序访问关系型查询引擎提供了编程接口,为查询 ...
- mapreduce入门之wordcount注释详解
mapreduce版本:0.2.0之前 说明: 该注释为之前学习时找到的一篇,现在只是在入门以后对该注释做了一些修正以及添加. 由于版本问题,该代码并没有在集群环境中运行,只将其做为理解mapredu ...
- Sharded实现学习-我们到底能走多远系列(32)
我们到底能走多远系列(32) 扯淡: 工作是容易的赚钱是困难的 恋爱是容易的成家是困难的 相爱是容易的相处是困难的 决定是容易的可是等待是困难的 主题: 1,Sharded的实现 Sharded ...
- json数据的jquery操作和asp.net后台操作
jquery操作 json对象创建 var item0={"a":"val1","b":"val2"}; json对象字 ...
- (转载)Hadoop map reduce 过程获取环境变量
来源:http://www.linuxidc.com/Linux/2012-07/66337.htm 作者: lmc_wy Hadoop任务执行过程中,在每一个map节点或者reduce节点能获取 ...
- 三星Mega 6.3(i9200)删除kingroot
关于kingroot实际效果怎么样,是不是流氓软件,我不做评论. 手机型号,三星galaxy mega 6.3(i9200)水货,更详细的机型不知道怎么看. 刷了官方的rom以后,下载了kingroo ...
- Qt_Window@Qt Command Prompt从命令行创建工程
#include <QApplication> #include <QLabel> int main(int argc, char *argv[]) { QApplicatio ...
- (转) An overview of gradient descent optimization algorithms
An overview of gradient descent optimization algorithms Table of contents: Gradient descent variants ...