lucene.net全文检索(二)lucene.net 的封装
查询
public class LuceneQuery : ILuceneQuery
{
#region Identity
private Logger logger = new Logger(typeof(LuceneQuery));
#endregion Identity #region QueryIndex
/// <summary>
/// 获取商品信息数据
/// </summary>
/// <param name="queryString"></param>
/// <returns></returns>
public List<Commodity> QueryIndex(string queryString)
{
IndexSearcher searcher = null;
try
{
List<Commodity> ciList = new List<Commodity>();
Directory dir = FSDirectory.Open(StaticConstant.IndexPath);
searcher = new IndexSearcher(dir);
Analyzer analyzer = new PanGuAnalyzer(); //--------------------------------------这里配置搜索条件
QueryParser parser = new QueryParser(Version.LUCENE_30, "title", analyzer);
Query query = parser.Parse(queryString);
Console.WriteLine(query.ToString()); //显示搜索表达式
TopDocs docs = searcher.Search(query, (Filter)null, 10000); foreach (ScoreDoc sd in docs.ScoreDocs)
{
Document doc = searcher.Doc(sd.Doc);
ciList.Add(DocumentToCommodityInfo(doc));
} return ciList;
}
finally
{
if (searcher != null)
{
searcher.Dispose();
}
}
} /// <summary>
/// 分页获取商品信息数据
/// </summary>
/// <param name="queryString"></param>
/// <param name="pageIndex">第一页为1</param>
/// <param name="pageSize"></param>
/// <param name="totalCount"></param>
/// <returns></returns>
public List<Commodity> QueryIndexPage(string queryString, int pageIndex, int pageSize, out int totalCount, string priceFilter, string priceOrderBy)
{
totalCount = 0;
IndexSearcher searcher = null;
try
{
List<Commodity> ciList = new List<Commodity>();
FSDirectory dir = FSDirectory.Open(StaticConstant.IndexPath);
searcher = new IndexSearcher(dir);
Analyzer analyzer = new PanGuAnalyzer(); //--------------------------------------这里配置搜索条件
QueryParser parser = new QueryParser(Version.LUCENE_30, "title", analyzer);
Query query = parser.Parse(queryString); pageIndex = Math.Max(1, pageIndex);//索引从1开始
int startIndex = (pageIndex - 1) * pageSize;
int endIndex = pageIndex * pageSize; NumericRangeFilter<float> numPriceFilter = null;
if (!string.IsNullOrWhiteSpace(priceFilter))
{
bool isContainStart = priceFilter.StartsWith("[");
bool isContainEnd = priceFilter.EndsWith("]");
string[] floatArray = priceFilter.Replace("[", "").Replace("]", "").Replace("{", "").Replace("}", "").Split(',');
float start = 0;
float end = 0;
if (!float.TryParse(floatArray[0], out start) || !float.TryParse(floatArray[1], out end))
{
throw new Exception("Wrong priceFilter");
}
numPriceFilter = NumericRangeFilter.NewFloatRange("price", start, end, isContainStart, isContainEnd);
} Sort sort = new Sort();
if (!string.IsNullOrWhiteSpace(priceOrderBy))
{
SortField sortField = new SortField("price", SortField.FLOAT, priceOrderBy.EndsWith("asc", StringComparison.CurrentCultureIgnoreCase));
sort.SetSort(sortField);
} TopDocs docs = searcher.Search(query, numPriceFilter, 10000, sort);
//TopDocs docs = searcher.Search(query, null, 10000); totalCount = docs.TotalHits;
//PrintScores(docs, startIndex, endIndex, searcher);
for (int i = startIndex; i < endIndex && i < totalCount; i++)
{
Document doc = searcher.Doc(docs.ScoreDocs[i].Doc);
ciList.Add(DocumentToCommodityInfo(doc));
} return ciList;
}
finally
{
if (searcher != null)
{
searcher.Dispose();
}
}
} private void PrintScores(TopDocs docs, int startIndex, int endIndex, MultiSearcher searcher)
{
ScoreDoc[] scoreDocs = docs.ScoreDocs;
for (int i = startIndex; i < endIndex && i < scoreDocs.Count(); i++)
{
int docId = scoreDocs[i].Doc;
Document doc = searcher.Doc(docId);
logger.Info(string.Format("{0}的分值为{1}", doc.Get("productid"), scoreDocs[i].Score));
}
} #endregion QueryIndex #region private
private Commodity DocumentToCommodityInfo(Document doc)
{
return new Commodity()
{
Id = int.Parse(doc.Get("id")),
Title = doc.Get("title"),
ProductId = long.Parse(doc.Get("productid")),
CategoryId = int.Parse(doc.Get("categoryid")),
ImageUrl = doc.Get("iamgeurl"),
Price = decimal.Parse(doc.Get("price")),
Url = doc.Get("url")
};
} #endregion private
}
批量/单个索引的增删改
/// <summary>
/// 多线程的问题 :多文件写,然后合并
/// 延时:异步队列
///
/// </summary>
public class LuceneBulid : ILuceneBulid
{
#region Identity
private Logger logger = new Logger(typeof(LuceneBulid));
#endregion Identity #region 批量BuildIndex 索引合并
/// <summary>
/// 批量创建索引(要求是统一的sourceflag,即目录是一致的)
/// </summary>
/// <param name="ciList">sourceflag统一的</param>
/// <param name="pathSuffix">索引目录后缀,加在电商的路径后面,为空则为根目录.如sa\1</param>
/// <param name="isCreate">默认为false 增量索引 true的时候删除原有索引</param>
public void BuildIndex(List<Commodity> ciList, string pathSuffix = "", bool isCreate = false)
{
IndexWriter writer = null;
try
{
if (ciList == null || ciList.Count == 0)
{
return;
} string rootIndexPath = StaticConstant.IndexPath;
string indexPath = string.IsNullOrWhiteSpace(pathSuffix) ? rootIndexPath : string.Format("{0}\\{1}", rootIndexPath, pathSuffix); DirectoryInfo dirInfo = Directory.CreateDirectory(indexPath);
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, new PanGuAnalyzer(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
//writer = new IndexWriter(directory, CreateAnalyzerWrapper(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(100);//控制写入一个新的segent前内存中保存的doc的数量 默认10
writer.MergeFactor = 100;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建复合文件 减少索引文件数量 ciList.ForEach(c => CreateCIIndex(writer, c));
}
finally
{
if (writer != null)
{
//writer.Optimize(); 创建索引的时候不做合并 merge的时候处理
writer.Close();
}
}
} /// <summary>
/// 将索引合并到上级目录
/// </summary>
/// <param name="sourceDir">子文件夹名</param>
public void MergeIndex(string[] childDirs)
{
Console.WriteLine("MergeIndex Start");
IndexWriter writer = null;
try
{
if (childDirs == null || childDirs.Length == 0) return;
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
string rootPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootPath);
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);//删除原有的
LuceneIO.Directory[] dirNo = childDirs.Select(dir => LuceneIO.FSDirectory.Open(Directory.CreateDirectory(string.Format("{0}\\{1}", rootPath, dir)))).ToArray();
writer.MergeFactor = 100;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建符合文件 减少索引文件数量
writer.AddIndexesNoOptimize(dirNo);
}
finally
{
if (writer != null)
{
writer.Optimize();
writer.Close();
}
Console.WriteLine("MergeIndex End");
}
} //Field.Store.YES:存储字段值(未分词前的字段值)
//Field.Store.NO:不存储,存储与索引没有关系
//Field.Store.COMPRESS:压缩存储,用于长文本或二进制,但性能受损
//Field.Index.ANALYZED:分词建索引
//Field.Index.ANALYZED_NO_NORMS:分词建索引,但是Field的值不像通常那样被保存,而是只取一个byte,这样节约存储空间
//Field.Index.NOT_ANALYZED:不分词且索引
//Field.Index.NOT_ANALYZED_NO_NORMS:不分词建索引,Field的值去一个byte保存
//TermVector表示文档的条目(由一个Document和Field定位)和它们在当前文档中所出现的次数
//Field.TermVector.YES:为每个文档(Document)存储该字段的TermVector
//Field.TermVector.NO:不存储TermVector
// Field.TermVector.WITH_POSITIONS:存储位置
//Field.TermVector.WITH_OFFSETS:存储偏移量
//Field.TermVector.WITH_POSITIONS_OFFSETS:存储位置和偏移量
#endregion 批量BuildIndex 索引合并 #region 单个/批量索引增删改
/// <summary>
/// 新增一条数据的索引
/// </summary>
/// <param name="ci"></param>
public void InsertIndex(Commodity ci)
{
IndexWriter writer = null;
try
{
if (ci == null) return;
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath); bool isCreate = dirInfo.GetFiles().Count() == 0;//下面没有文件则为新建索引
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, CreateAnalyzerWrapper(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
writer.MergeFactor = 100;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建符合文件 减少索引文件数量
CreateCIIndex(writer, ci);
}
catch (Exception ex)
{
logger.Error("InsertIndex异常", ex);
throw ex;
}
finally
{
if (writer != null)
{
//if (fileNum > 50)
// writer.Optimize();
writer.Close();
}
}
} /// <summary>
/// 批量新增数据的索引
/// </summary>
/// <param name="ciList"></param>
public void InsertIndexMuti(List<Commodity> ciList)
{
BuildIndex(ciList, "", false);
} /// <summary>
/// 批量删除数据的索引
/// </summary>
/// <param name="ciList"></param>
public void DeleteIndexMuti(List<Commodity> ciList)
{
IndexReader reader = null;
try
{
if (ciList == null || ciList.Count == 0) return;
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath);
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
reader = IndexReader.Open(directory, false);
foreach (Commodity ci in ciList)
{
reader.DeleteDocuments(new Term("productid", ci.ProductId.ToString()));
}
}
catch (Exception ex)
{
logger.Error("DeleteIndex异常", ex);
throw ex;
}
finally
{
if (reader != null)
{
reader.Dispose();
}
}
} /// <summary>
/// 删除多条数据的索引
/// </summary>
/// <param name="ci"></param>
public void DeleteIndex(Commodity ci)
{
IndexReader reader = null;
try
{
if (ci == null) return;
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath);
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
reader = IndexReader.Open(directory, false);
reader.DeleteDocuments(new Term("productid", ci.ProductId.ToString()));
}
catch (Exception ex)
{ logger.Error("DeleteIndex异常", ex);
throw ex;
}
finally
{
if (reader != null)
{
reader.Dispose();
}
}
} /////// <summary>
/////// 更新一条数据的索引
/////// </summary>
//public void UpdateIndex(Commodity ci)
//{
// DeleteIndex(ci);
// InsertIndex(ci);
//} /// <summary>
/// 更新一条数据的索引
/// </summary>
/// <param name="ci"></param>
public void UpdateIndex(Commodity ci)
{
IndexWriter writer = null;
try
{
if (ci == null) return;
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath); bool isCreate = dirInfo.GetFiles().Count() == 0;//下面没有文件则为新建索引
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, CreateAnalyzerWrapper(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
writer.MergeFactor = 100;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建符合文件 减少索引文件数量
writer.UpdateDocument(new Term("productid", ci.ProductId.ToString()), ParseCItoDoc(ci));
}
catch (Exception ex)
{
logger.Error("InsertIndex异常", ex);
throw ex;
}
finally
{
if (writer != null)
{
//if (fileNum > 50)
// writer.Optimize();
writer.Close();
}
}
} /// <summary>
/// 批量更新数据的索引
/// </summary>
/// <param name="ciList">sourceflag统一的</param>
public void UpdateIndexMuti(List<Commodity> ciList)
{
IndexWriter writer = null;
try
{
if (ciList == null || ciList.Count == 0) return;
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath); bool isCreate = dirInfo.GetFiles().Count() == 0;//下面没有文件则为新建索引
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, CreateAnalyzerWrapper(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
writer.MergeFactor = 50;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建符合文件 减少索引文件数量
foreach (Commodity ci in ciList)
{
writer.UpdateDocument(new Term("productid", ci.ProductId.ToString()), ParseCItoDoc(ci));
}
}
catch (Exception ex)
{
logger.Error("InsertIndex异常", ex);
throw ex;
}
finally
{
if (writer != null)
{
//if (fileNum > 50)
// writer.Optimize();
writer.Close();
}
}
}
#endregion 单个索引增删改 #region PrivateMethod
/// <summary>
/// 创建分析器
/// </summary>
/// <returns></returns>
private PerFieldAnalyzerWrapper CreateAnalyzerWrapper()
{
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30); PerFieldAnalyzerWrapper analyzerWrapper = new PerFieldAnalyzerWrapper(analyzer);
analyzerWrapper.AddAnalyzer("title", new PanGuAnalyzer());
analyzerWrapper.AddAnalyzer("categoryid", new StandardAnalyzer(Version.LUCENE_30));
return analyzerWrapper;
} /// <summary>
/// 创建索引
/// </summary>
/// <param name="analyzer"></param>
/// <param name="title"></param>
/// <param name="content"></param>
private void CreateCIIndex(IndexWriter writer, Commodity ci)
{
try
{
writer.AddDocument(ParseCItoDoc(ci));
}
catch (Exception ex)
{
logger.Error("CreateCIIndex异常", ex);
throw ex;
}
} /// <summary>
/// 将Commodity转换成doc
/// </summary>
/// <param name="ci"></param>
/// <returns></returns>
private Document ParseCItoDoc(Commodity ci)
{
Document doc = new Document(); doc.Add(new Field("id", ci.Id.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("title", ci.Title, Field.Store.YES, Field.Index.ANALYZED));//盘古分词
doc.Add(new Field("productid", ci.ProductId.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("categoryid", ci.CategoryId.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("imageurl", ci.ImageUrl, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("url", ci.Url, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new NumericField("price", Field.Store.YES, true).SetFloatValue((float)ci.Price));
return doc;
} #endregion PrivateMethod
}
分词器封装
public class LuceneAnalyze : ILuceneAnalyze
{
private Logger logger = new Logger(typeof(LuceneAnalyze)); //
#region AnalyzerKey
/// <summary>
/// 将搜索的keyword分词
/// 通过or 链接;查询更多的数据(贪婪查询)
/// </summary>
/// <param name="keyword"></param>
/// <returns></returns>
public string[] AnalyzerKey(string keyword)
{
Analyzer analyzer = new PanGuAnalyzer();
QueryParser parser = new QueryParser(Version.LUCENE_30, "title", analyzer);
Query query = parser.Parse(this.CleanKeyword(keyword));
if (query is TermQuery)
{
Term term = ((TermQuery)query).Term;
return new string[] { term.Text };
}
else if (query is PhraseQuery)
{
Term[] term = ((PhraseQuery)query).GetTerms();
return term.Select(t => t.Text).ToArray();
}
else if (query is BooleanQuery)// and or
{
BooleanClause[] clauses = ((BooleanQuery)query).GetClauses();
List<string> analyzerWords = new List<string>();
foreach (BooleanClause clause in clauses)
{
Query childQuery = clause.Query;
if (childQuery is TermQuery)
{
Term term = ((TermQuery)childQuery).Term;
analyzerWords.Add(term.Text);
}
else if (childQuery is PhraseQuery)
{
Term[] term = ((PhraseQuery)childQuery).GetTerms();
analyzerWords.AddRange(term.Select(t => t.Text));
}
}
return analyzerWords.ToArray();
}
else
{
logger.Debug(string.Format("AnalyzerKey在解析keyword={0}的结果为new string[] { keyword } ", keyword));
return new string[] { keyword };
}
} /// <summary>
/// 清理头尾and or 关键字
/// </summary>
/// <param name="keyword"></param>
/// <returns></returns>
private string CleanKeyword(string keyword)
{
if (string.IsNullOrWhiteSpace(keyword))
{ }
else
{
bool isClean = false;
while (!isClean)
{
keyword = keyword.Trim();
if (keyword.EndsWith(" AND"))
{
keyword = string.Format("{0}and", keyword.Remove(keyword.Length - 3, 3));
}
else if (keyword.EndsWith(" OR"))
{
keyword = string.Format("{0}or", keyword.Remove(keyword.Length - 2, 2));
}
else if (keyword.StartsWith("AND "))
{
keyword = string.Format("and{0}", keyword.Substring(3));
}
else if (keyword.StartsWith("OR "))
{
keyword = string.Format("or{0}", keyword.Substring(2));
}
else if (keyword.Contains(" OR "))
{
keyword = keyword.Replace(" OR ", " or ");
}
else if (keyword.Contains(" AND "))
{
keyword = keyword.Replace(" AND ", " and ");
}
else
isClean = true;
} }
return QueryParser.Escape(keyword);
}
#endregion AnalyzerKey
lucene.net全文检索(二)lucene.net 的封装的更多相关文章
- 全文检索框架---Lucene
一.什么是全文检索 1.数据分类 我们生活中的数据总体分为两种:结构化数据和非结构化数据. 结构化数据:指具有固定格式或有限长度的数据,如数据库,元数据等. 非结构化数据:指不定长或无固定格式 ...
- JAVAEE——Lucene基础:什么是全文检索、Lucene实现全文检索的流程、配置开发环境、索引库创建与管理
1. 学习计划 第一天:Lucene的基础知识 1.案例分析:什么是全文检索,如何实现全文检索 2.Lucene实现全文检索的流程 a) 创建索引 b) 查询索引 3.配置开发环境 4.创建索引库 5 ...
- Lucene 实例教程(二)
原创作品,允许转载,转载时请务必以超链接形式标明文章 原始出处 .作者信息和本人声明.否则将追究法律责任. 作者: 永恒の_☆ 地址: http://blog.csdn.net/chenghui031 ...
- Lucene 01 - 初步认识全文检索和Lucene
目录 1 搜索简介 1.1 搜索实现方案 1.2 数据查询方法 1.2.1 顺序扫描法 1.2.2 倒排索引法(反向索引) 1.3 搜索技术应用场景 2 Lucene简介 2.1 Lucene是什么 ...
- Lucene的全文检索学习
Lucene的官方网站(Apache的顶级项目):http://lucene.apache.org/ 1.什么是Lucene? Lucene 是 apache 软件基金会的一个子项目,由 Doug C ...
- 全文检索以及Lucene的应用
全文检索 一.什么是全文检索? 就是在检索数据,数据的分类: 在计算机当中,比如说存在磁盘的文本文档,HTML页面,Word文档等等...... 1.结构化数据 格式固定,长度固定,数据类型固定等等, ...
- 基于Lucene的全文检索实践
由于项目的需要,使用到了全文检索技术,这里将前段时间所做的工作进行一个实践总结,方便以后查阅.在实际的工作中,需要灵活的使用lucene里面的查询技术,以达到满足业务要求与搜索性能提升的目的. 一.全 ...
- lucene解决全文检索word2003,word2007的办法
在上一篇文章中 ,lucene只能全文检索word2003,无法检索2007,并且只能加载部分内容,无法加载全文内容.为解决此问题,找到了如下方法 POI 读取word (word 2003 和 wo ...
- 大型运输行业实战_day15_1_全文检索之Lucene
1.引入 全文检索简介: 非结构化数据又一种叫法叫全文数据.从全文数据(文本)中进行检索就叫全文检索. 2.数据库搜索的弊端 案例 : select * from product whe ...
- Lucene学习之二:Lucene的总体架构
本文转载自:http://www.cnblogs.com/forfuture1978/archive/2009/12/14/1623596.html Lucene总的来说是: 一个高效的,可扩展的,全 ...
随机推荐
- tomact
常见的java相关的web服务器软件: *webLogic:oracle公司,大型的JavaEE服务器,支持所有的JavaEE规范,收费. *webSphere:IBM公司,大型的JavaEE ...
- Vue重用组件
1.是什么? 这里主要是简单入门使用一下,复杂高阶的用法笔者暂时还没了解到 Vue重用组件是指可以被多个Vue实例重复使用的组件.这些组件可以包含自定义的状态和事件处理程序,并且可以在整个应用程序中共 ...
- Scrapy-redis组件,实现分布式爬虫
安装包 pip install -U scrapy-redis settings.py ##### Scrapy-Redis ##### ### Scrapy指定Redis 配置 ### # 其他默认 ...
- Python——Html(表格<table>, <tr>,<td>,<th>、表单<form>、自定义标签<div>和<span>)
一.表格<table>, <tr>,<td>或<th> <table> 元素是 HTML 中用于创建表格的主要标记.表格是一种用于展示数据的 ...
- Python——第四章:生成器(Generators)
生成器(generator): 生成器的本质就是迭代器 创建生成器的两种方案: 1. 生成器函数 2. 生成器表达式 生成器函数 生成器函数中有一个关键 ...
- elastic优化
通过定义keyword 的 "null_value" :"NULL",使得搜索是不用单独使用exists查询.统一用terms查询就能查询到想要的结果 利用co ...
- manjaro下使用deepin-wine5解决wechat无法发送图片的问题
问题 在manjaro操作系统下,使用了deepin-wine安装wechat.但是,wechat运行无法发送较大图片且截图功能也有问题. 解决 在参考了github之后,我找到了解决方案. 附上链接 ...
- MySQL的事务(看看也许有帮助呢)
MySQL的事务 一.事务的概念 在MySQL中,只有InnoDB存储引擎才支持事务. 事务的处理用来维护数据库数据的完整性,保证同一个事务里的一批SQL语句,要么全部执行,要么全部不执行. 事务用来 ...
- 跟我学丨如何用鲲鹏服务器搭建Hadoop全分布式集群
摘要:今天教大家如何利用鲲鹏服务器搭建Hadoop全分布式集群,动起来··· 一.Hadoop常见的三种运行模式 1.单机模式(独立模式)(Local或Standalone Mode) 默认情况下Ha ...
- 关于单元测试的那些事儿,Mockito 都能帮你解决
摘要:相信每一个程序猿在写Unit Test的时候都会碰到一些令人头疼的问题:如何测试一个rest接口:如何测试一个包含客户端调用服务端的复杂方法:如何测试一个包含从数据库读取数据的复杂方法...这些 ...