elastisSearch-aggregations
运行结果
统计每个学员的总成绩
这个是索引库使用通配符
优先在本地查询
只在本地节点中查询
只在指定id的节点里面进行查询
查询指定分片的数据
参考代码ESTestAggregation.java
package com.dajiangtai.djt_spider.elasticsearch; import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit; import org.codehaus.jackson.map.ObjectMapper;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
import org.junit.Before;
import org.junit.Test;
/**
* Aggregation 操作
*
* @author 大讲台
*
*/
public class ESTestAggregation {
private TransportClient client; @Before
public void test0() throws UnknownHostException { // 开启client.transport.sniff功能,探测集群所有节点
Settings settings = Settings.settingsBuilder()
.put("cluster.name", "escluster")
.put("client.transport.sniff", true).build();
// on startup
// 获取TransportClient
client = TransportClient
.builder()
.settings(settings)
.build()
.addTransportAddress(
new InetSocketTransportAddress(InetAddress
.getByName("master"), 9300))
.addTransportAddress(
new InetSocketTransportAddress(InetAddress
.getByName("slave1"), 9300))
.addTransportAddress(
new InetSocketTransportAddress(InetAddress
.getByName("slave2"), 9300));
}
/**
* Aggregation 分组统计相同年龄学员个数
* @throws Exception
*/
@Test
public void test1() throws Exception {
SearchRequestBuilder builder = client.prepareSearch("djt1");
builder.setTypes("user")
.setQuery(QueryBuilders.matchAllQuery())
//按年龄分组聚合统计
.addAggregation(AggregationBuilders.terms("by_age").field("age").size(0))
; SearchResponse searchResponse = builder.get();
//获取分组信息
Terms terms = searchResponse.getAggregations().get("by_age");
List<Bucket> buckets = terms.getBuckets();
for (Bucket bucket : buckets) {
System.out.println(bucket.getKey()+":"+bucket.getDocCount());
}
} /**
* Aggregation 分组统计每个学员的总成绩
* @throws Exception
*/
@Test
public void test2() throws Exception {
SearchRequestBuilder builder = client.prepareSearch("djt2");
builder.setTypes("user")
.setQuery(QueryBuilders.matchAllQuery())
//按姓名分组聚合统计
.addAggregation(AggregationBuilders.terms("by_name")
.field("name")
.subAggregation(AggregationBuilders.sum("sum_score")
.field("score"))
.size(0))
;
SearchResponse searchResponse = builder.get();
//获取分组信息
Terms terms = searchResponse.getAggregations().get("by_name");
List<Bucket> buckets = terms.getBuckets();
for (Bucket bucket : buckets) {
Sum sum = bucket.getAggregations().get("sum_score");
System.out.println(bucket.getKey()+":"+sum.getValue());
}
} /**
* 支持多索引和多类型查询
* @throws Exception
*/
@Test
public void test3() throws Exception {
SearchRequestBuilder builder
= client//.prepareSearch("djt1","djt2")//可以指定多个索引库
.prepareSearch("djt*")//索引库可以使用通配符
.setTypes("user");//支持多个类型,但不支持通配符 SearchResponse searchResponse = builder.get(); SearchHits hits = searchResponse.getHits();
SearchHit[] hits2 = hits.getHits();
for (SearchHit searchHit : hits2) {
System.out.println(searchHit.getSourceAsString());
}
}
/**
* 分片查询方式
* @throws Exception
*/
@Test
public void test4() throws Exception {
SearchRequestBuilder
builder = client.prepareSearch("djt3")
.setTypes("user")
//.setPreference("_local")
//.setPreference("_only_local")
//.setPreference("_primary")
//.setPreference("_replica")
//.setPreference("_primary_first")
//.setPreference("_replica_first")
//.setPreference("_only_node:crKxtA2fRTG1UZdPN8QtaA")
//.setPreference("_prefer_node:nJL_MqcsSle6gY7iujoAlw")
.setPreference("_shards:3")
;
SearchResponse searchResponse = builder.get();
SearchHits hits = searchResponse.getHits();
SearchHit[] hits2 = hits.getHits();
for (SearchHit searchHit : hits2) {
System.out.println(searchHit.getSourceAsString());
}
}
/**
* 极速查询:通过路由插入数据(同一类别数据在一个分片)
* @throws Exception
*/
@Test
public void test5() throws Exception {
Acount acount = new Acount("13602546655","tom1","male",16);
Acount acount2 = new Acount("13602546655","tom2","male",17);
Acount acount3 = new Acount("13602546655","tom3","male",18);
Acount acount4 = new Acount("18903762536","john1","male",28);
Acount acount5 = new Acount("18903762536","john2","male",29);
Acount acount6 = new Acount("18903762536","john3","male",30);
List<Acount> list = new ArrayList<Acount>();
list.add(acount);
list.add(acount2);
list.add(acount3);
list.add(acount4);
list.add(acount5);
list.add(acount6); BulkProcessor bulkProcessor = BulkProcessor.builder(
client,
new BulkProcessor.Listener() { public void beforeBulk(long executionId, BulkRequest request) {
// TODO Auto-generated method stub
System.out.println(request.numberOfActions());
} public void afterBulk(long executionId, BulkRequest request,
Throwable failure) {
// TODO Auto-generated method stub
System.out.println(failure.getMessage());
} public void afterBulk(long executionId, BulkRequest request,
BulkResponse response) {
// TODO Auto-generated method stub
System.out.println(response.hasFailures());
}
})
.setBulkActions(1000) // 每个批次的最大数量
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))// 每个批次的最大字节数
.setFlushInterval(TimeValue.timeValueSeconds(5))// 每批提交时间间隔
.setConcurrentRequests(1) //设置多少个并发处理线程
//可以允许用户自定义当一个或者多个bulk请求失败后,该执行如何操作
.setBackoffPolicy(
BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(100), 3))
.build();
for (Acount a : list) {
ObjectMapper mapper = new ObjectMapper(); byte[] json = mapper.writeValueAsBytes(a);
bulkProcessor.add(new IndexRequest("djt3", "user")
.routing(a.getPhone().substring(0, 3))
.source(json));
} //阻塞至所有的请求线程处理完毕后,断开连接资源
bulkProcessor.awaitClose(3, TimeUnit.MINUTES);
client.close();
}
/**
* 极速查询:通过路由极速查询,也可以通过分片shards查询演示
*
* @throws Exception
*/
@Test
public void test6() throws Exception {
SearchRequestBuilder builder = client.prepareSearch("djt3")//可以指定多个索引库
.setTypes("user");//支持多个类型,但不支持通配符
builder.setQuery(QueryBuilders.matchAllQuery())
.setRouting("13602546655".substring(0, 3))
//.setRouting("18903762536".substring(0, 3))
;
SearchResponse searchResponse = builder.get(); SearchHits hits = searchResponse.getHits();
SearchHit[] hits2 = hits.getHits();
for (SearchHit searchHit : hits2) {
System.out.println(searchHit.getSourceAsString());
}
}
}
elastisSearch-aggregations的更多相关文章
- hive的Query和Insert,Group by,Aggregations(聚合)操作
1.Query (1)分区查询 在查询的过程中,采用那个分区来查询是通过系统自动的决定,但是必须是在分区列上基于where子查询. SELECT page_views.* FROM page_view ...
- ElasticSearch 的 聚合(Aggregations)
Elasticsearch有一个功能叫做 聚合(aggregations) ,它允许你在数据上生成复杂的分析统计.它很像SQL中的 GROUP BY 但是功能更强大. Aggregations种类分为 ...
- aggregation 详解2(metrics aggregations)
概述 权值聚合类型从需要聚合的文档中取一个值(value)来计算文档的相应权值(比如该值在这些文档中的max.sum等). 用于计算的值(value)可以是文档的字段(field),也可以是脚本(sc ...
- aggregation 详解4(pipeline aggregations)
概述 管道聚合处理的对象是其它聚合的输出(桶或者桶的某些权值),而不是直接针对文档. 管道聚合的作用是为输出增加一些有用信息. 管道聚合大致分为两类: parent 此类聚合的"输入&quo ...
- aggregations 详解1(概述)
aggregation分类 aggregations —— 聚合,提供了一种基于查询条件来对数据进行分桶.计算的方法.有点类似于 SQL 中的 group by 再加一些函数方法的操作. 聚合可以嵌套 ...
- Elasticsearch aggregations API
聚合能力 Aggregation API 类似 SQL 中的 GROUP BY 语句,可以以某个字段来进行分组. Aggregation API 支持分级分组,多级的分组过程是由外到里的. Aggre ...
- Spark学习之路(十一)—— Spark SQL 聚合函数 Aggregations
一.简单聚合 1.1 数据准备 // 需要导入spark sql内置的函数包 import org.apache.spark.sql.functions._ val spark = SparkSess ...
- Spark 系列(十一)—— Spark SQL 聚合函数 Aggregations
一.简单聚合 1.1 数据准备 // 需要导入 spark sql 内置的函数包 import org.apache.spark.sql.functions._ val spark = SparkSe ...
- 使用 ElasticSearch Aggregations 进行统计分析(转)
https://blog.csdn.net/cs729298/article/details/68926969 ElasticSearch 的特点随处可见:基于 Lucene 的分布式搜索引擎,友好的 ...
- Elasticsearch系列(二)--query、filter、aggregations
本文基于ES6.4版本,我也是出于学习阶段,对学习内容做个记录,如果文中有错误,请指出. 实验数据: index:book type:novel mappings: { "mappings& ...
随机推荐
- Unity 3D游戏-见缝插针源码
Unity见缝插针功能实现 本文提供全流程,中文翻译.Chinar坚持将简单的生活方式,带给世人!(拥有更好的阅读体验 -- 高分辨率用户请根据需求调整网页缩放比例) 1 Sphere Rotatio ...
- Codeforces Round #462 (Div. 2) B-A Prosperous Lot
B. A Prosperous Lot time limit per test 1 second memory limit per test 256 megabytes input standard ...
- width百分比
table中的td可以在页面中直接在元素上设置width:但是li不能只能在页面中写style: <!-- <li width="20%" class="p- ...
- spket插件的安装与使用完整图文版
下载最新破解版的spket1.6.18(见下面附件) 对于目前的MyEclipse的插件安装是很简单的,把spket1.6.18破解版.zip解压后直接复制到MyEclipse安装目录的dropins ...
- day23 python学习 类 人狗大战
面向过程 VS 面向对象 面向过程的程序设计的核心是过程(流水线式思维),过程即解决问题的步骤,面向过程的设计就好比精心设计好一条流水线,考虑周全什么时候处理什么东西. 面向过程 优点是:极大的降低了 ...
- git server side hook 试用
git 的hook 是一个很方便的功能,我们可以使用hook 做好多处理,比如client side hook 进行 提交格式校验,server side 进行ci/cd 处理 测试使用docker- ...
- 转 MetaWeblog API 编写
如今,许多人都熟悉个人和公司或业界主办的博客.后者明显成为了传统公司和行业网站的下一代新兴产物.博客的内容涉及从简洁的特制产品公告和公共关系到实用且深刻的主题探索,这些主题可能对公司的产品或行业的未来 ...
- 微软通过.NET Native为Windows Store应用提速
.NET Native是微软的一次尝试,旨在降低Windows Store应用的启动时间和内存占用. 自从去年11月份,有人发现Windows Store应用的启动速度有了大幅提高后,对该项目的猜测就 ...
- XtraForm
禁用窗体大小变化 this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.Fixed3D; Note:设置成FixedSingle是无效 ...
- echarts 知识点
echarts map 禁止放大缩小,设置 calculable 为 false 即可. calculable: false echarts 报错: There is a chart instance ...