ES code study
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>cn.edu360.es</groupId>
<artifactId>HelloES</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<encoding>UTF-8</encoding>
</properties>
<dependencies>
<!-- es的客户端-->
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>transport</artifactId>
<version>5.4.3</version>
</dependency>
<!-- 依赖2.x的log4j -->
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-api</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.8.2</version>
</dependency>
<!-- 单元测试 -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
</dependencies>
</project>
package cn.edu360.es;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import java.net.InetAddress;
/**
* Created by zx on 2017/8/15.
*/
public class HelloWorld {
public static void main(String[] args) {
try {
//设置集群名称
Settings settings = Settings.builder()
.put("cluster.name", "my-es")
.build();
//创建client
TransportClient client = new PreBuiltTransportClient(settings).addTransportAddresses(
//用java访问ES用的端口是9300
new InetSocketTransportAddress(InetAddress.getByName("192.168.100.211"), 9300),
new InetSocketTransportAddress(InetAddress.getByName("192.168.100.212"), 9300),
new InetSocketTransportAddress(InetAddress.getByName("192.168.100.213"), 9300));
//搜索数据(.actionGet()方法是同步的,没有返回就等待)
GetResponse response = client.prepareGet("news", "fulltext", "1").execute().actionGet();
//输出结果
System.out.println(response);
//关闭client
client.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
package cn.edu360.es;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.MultiGetItemResponse;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.reindex.DeleteByQueryAction;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg;
import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.sum.InternalSum;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.net.InetAddress;
import java.util.Date;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
/**
* Created by zx on 2017/9/5.
* https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.4/index.html
*/
public class EsCRUD {
private TransportClient client = null;
@Before
public void init() throws Exception {
//设置集群名称
Settings settings = Settings.builder()
.put("cluster.name", "my-es")
//自动感知的功能(可以通过当前指定的节点获取所有es节点的信息)
.put("client.transport.sniff", true)
.build();
//创建client
client = new PreBuiltTransportClient(settings).addTransportAddresses(
new InetSocketTransportAddress(InetAddress.getByName("192.168.100.211"), 9300),
new InetSocketTransportAddress(InetAddress.getByName("192.168.100.212"), 9300),
new InetSocketTransportAddress(InetAddress.getByName("192.168.100.213"), 9300));
}
@Test
public void testCreate() throws IOException {
IndexResponse response = client.prepareIndex("gamelog", "users", "1")
.setSource(
jsonBuilder()
.startObject()
.field("username", "老赵")
.field("gender", "male")
.field("birthday", new Date())
.field("fv", 9999)
.field("message", "trying out Elasticsearch")
.endObject()
).get();
}
//查找一条
@Test
public void testGet() throws IOException {
GetResponse response = client.prepareGet("gamelog", "users", "1").get();
System.out.println(response.getSourceAsString());
}
//查找多条
@Test
public void testMultiGet() throws IOException {
MultiGetResponse multiGetItemResponses = client.prepareMultiGet()
.add("gamelog", "users", "1")
.add("gamelog", "users", "2", "3")
.add("news", "fulltext", "1")
.get();
for (MultiGetItemResponse itemResponse : multiGetItemResponses) {
GetResponse response = itemResponse.getResponse();
if (response.isExists()) {
String json = response.getSourceAsString();
System.out.println(json);
}
}
}
@Test
public void testUpdate() throws Exception {
UpdateRequest updateRequest = new UpdateRequest();
updateRequest.index("gamelog");
updateRequest.type("users");
updateRequest.id("2");
updateRequest.doc(
jsonBuilder()
.startObject()
.field("fv", 999.9)
.endObject());
client.update(updateRequest).get();
}
@Test
public void testDelete() {
DeleteResponse response = client.prepareDelete("gamelog", "users", "2").get();
System.out.println(response);
}
@Test
public void testDeleteByQuery() {
BulkByScrollResponse response =
DeleteByQueryAction.INSTANCE.newRequestBuilder(client)
//指定查询条件
.filter(QueryBuilders.matchQuery("username", "老段"))
//指定索引名称
.source("gamelog")
.get();
long deleted = response.getDeleted();
System.out.println(deleted);
}
//异步删除
@Test
public void testDeleteByQueryAsync() {
DeleteByQueryAction.INSTANCE.newRequestBuilder(client)
.filter(QueryBuilders.matchQuery("gender", "male"))
.source("gamelog")
.execute(new ActionListener<BulkByScrollResponse>() {
@Override
public void onResponse(BulkByScrollResponse response) {
long deleted = response.getDeleted();
System.out.println("数据删除了");
System.out.println(deleted);
}
@Override
public void onFailure(Exception e) {
e.printStackTrace();
}
});
try {
System.out.println("异步删除");
Thread.sleep(10000);
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void testRange() {
QueryBuilder qb = rangeQuery("fv")
// [88.99, 10000)
.from(88.99)
.to(10000)
.includeLower(true)
.includeUpper(false);
SearchResponse response = client.prepareSearch("gamelog").setQuery(qb).get();
System.out.println(response);
}
/**
* curl -XPUT 'http://192.168.5.251:9200/player_info/player/1' -d '{ "name": "curry", "age": 29, "salary": 3500,"team": "war", "position": "pg"}'
* curl -XPUT 'http://192.168.5.251:9200/player_info/player/2' -d '{ "name": "thompson", "age": 26, "salary": 2000,"team": "war", "position": "pg"}'
* curl -XPUT 'http://192.168.5.251:9200/player_info/player/3' -d '{ "name": "irving", "age": 25, "salary": 2000,"team": "cav", "position": "pg"}'
* curl -XPUT 'http://192.168.5.251:9200/player_info/player/4' -d '{ "name": "green", "age": 26, "salary": 2000,"team": "war", "position": "pf"}'
* curl -XPUT 'http://192.168.5.251:9200/player_info/player/5' -d '{ "name": "james", "age": 33, "salary": 4000,"team": "cav", "position": "sf"}'
*/
@Test
public void testAddPlayer() throws IOException {
IndexResponse response = client.prepareIndex("player_info", "player", "1")
.setSource(
jsonBuilder()
.startObject()
.field("name", "James")
.field("age", 33)
.field("salary", 3000)
.field("team", "cav")
.field("position", "sf")
.endObject()
).get();
}
/**
* https://elasticsearch.cn/article/102
*
* select team, count(*) as player_count from player group by team;
*/
@Test
public void testAgg1() {
//指定索引和type
SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
//按team分组然后聚合,但是并没有指定聚合函数
TermsAggregationBuilder teamAgg = AggregationBuilders.terms("player_count").field("team");
//添加聚合器
builder.addAggregation(teamAgg);
//触发
SearchResponse response = builder.execute().actionGet();
//System.out.println(response);
//将返回的结果放入到一个map中
Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
// Set<String> keys = aggMap.keySet();
//
// for (String key: keys) {
// System.out.println(key);
// }
// //取出聚合属性
StringTerms terms = (StringTerms) aggMap.get("player_count");
//
//// //依次迭代出分组聚合数据
// for (Terms.Bucket bucket : terms.getBuckets()) {
// //分组的名字
// String team = (String) bucket.getKey();
// //count,分组后一个组有多少数据
// long count = bucket.getDocCount();
// System.out.println(team + " " + count);
// }
Iterator<Terms.Bucket> teamBucketIt = terms.getBuckets().iterator();
while (teamBucketIt .hasNext()) {
Terms.Bucket bucket = teamBucketIt.next();
String team = (String) bucket.getKey();
long count = bucket.getDocCount();
System.out.println(team + " " + count);
}
}
/**
* select team, position, count(*) as pos_count from player group by team, position;
*/
@Test
public void testAgg2() {
SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
//指定别名和分组的字段
TermsAggregationBuilder teamAgg = AggregationBuilders.terms("team_name").field("team");
TermsAggregationBuilder posAgg= AggregationBuilders.terms("pos_count").field("position");
//添加两个聚合构建器
builder.addAggregation(teamAgg.subAggregation(posAgg));
//执行查询
SearchResponse response = builder.execute().actionGet();
//将查询结果放入map中
Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
//根据属性名到map中查找
StringTerms teams = (StringTerms) aggMap.get("team_name");
//循环查找结果
for (Terms.Bucket teamBucket : teams.getBuckets()) {
//先按球队进行分组
String team = (String) teamBucket.getKey();
Map<String, Aggregation> subAggMap = teamBucket.getAggregations().getAsMap();
StringTerms positions = (StringTerms) subAggMap.get("pos_count");
//因为一个球队有很多位置,那么还要依次拿出位置信息
for (Terms.Bucket posBucket : positions.getBuckets()) {
//拿到位置的名字
String pos = (String) posBucket.getKey();
//拿出该位置的数量
long docCount = posBucket.getDocCount();
//打印球队,位置,人数
System.out.println(team + " " + pos + " " + docCount);
}
}
}
/**
* select team, max(age) as max_age from player group by team;
*/
@Test
public void testAgg3() {
SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
//指定安球队进行分组
TermsAggregationBuilder teamAgg = AggregationBuilders.terms("team_name").field("team");
//指定分组求最大值
MaxAggregationBuilder maxAgg = AggregationBuilders.max("max_age").field("age");
//分组后求最大值
builder.addAggregation(teamAgg.subAggregation(maxAgg));
//查询
SearchResponse response = builder.execute().actionGet();
Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
//根据team属性,获取map中的内容
StringTerms teams = (StringTerms) aggMap.get("team_name");
for (Terms.Bucket teamBucket : teams.getBuckets()) {
//分组的属性名
String team = (String) teamBucket.getKey();
//在将聚合后取最大值的内容取出来放到map中
Map<String, Aggregation> subAggMap = teamBucket.getAggregations().getAsMap();
//取分组后的最大值
InternalMax ages = (InternalMax)subAggMap.get("max_age");
double max = ages.getValue();
System.out.println(team + " " + max);
}
}
/**
* select team, avg(age) as avg_age, sum(salary) as total_salary from player group by team;
*/
@Test
public void testAgg4() {
SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
//指定分组字段
TermsAggregationBuilder termsAgg = AggregationBuilders.terms("team_name").field("team");
//指定聚合函数是求平均数据
AvgAggregationBuilder avgAgg = AggregationBuilders.avg("avg_age").field("age");
//指定另外一个聚合函数是求和
SumAggregationBuilder sumAgg = AggregationBuilders.sum("total_salary").field("salary");
//分组的聚合器关联了两个聚合函数
builder.addAggregation(termsAgg.subAggregation(avgAgg).subAggregation(sumAgg));
SearchResponse response = builder.execute().actionGet();
Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
//按分组的名字取出数据
StringTerms teams = (StringTerms) aggMap.get("team_name");
for (Terms.Bucket teamBucket : teams.getBuckets()) {
//获取球队名字
String team = (String) teamBucket.getKey();
Map<String, Aggregation> subAggMap = teamBucket.getAggregations().getAsMap();
//根据别名取出平均年龄
InternalAvg avgAge = (InternalAvg)subAggMap.get("avg_age");
//根据别名取出薪水总和
InternalSum totalSalary = (InternalSum)subAggMap.get("total_salary");
double avgAgeValue = avgAge.getValue();
double totalSalaryValue = totalSalary.getValue();
System.out.println(team + " " + avgAgeValue + " " + totalSalaryValue);
}
}
/**
* select team, sum(salary) as total_salary from player group by team order by total_salary desc;
*/
@Test
public void testAgg5() {
SearchRequestBuilder builder = client.prepareSearch("player_info").setTypes("player");
//按team进行分组,然后指定排序规则
TermsAggregationBuilder termsAgg = AggregationBuilders.terms("team_name").field("team").order(Terms.Order.aggregation("total_salary ", true));
SumAggregationBuilder sumAgg = AggregationBuilders.sum("total_salary").field("salary");
builder.addAggregation(termsAgg.subAggregation(sumAgg));
SearchResponse response = builder.execute().actionGet();
Map<String, Aggregation> aggMap = response.getAggregations().getAsMap();
StringTerms teams = (StringTerms) aggMap.get("team_name");
for (Terms.Bucket teamBucket : teams.getBuckets()) {
String team = (String) teamBucket.getKey();
Map<String, Aggregation> subAggMap = teamBucket.getAggregations().getAsMap();
InternalSum totalSalary = (InternalSum)subAggMap.get("total_salary");
double totalSalaryValue = totalSalary.getValue();
System.out.println(team + " " + totalSalaryValue);
}
}
}
package cn.edu360.es;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.client.AdminClient;
import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.net.InetAddress;
import java.util.HashMap;
/**
* Created by zx on 2017/9/6.
*/
public class AdminAPI {
private TransportClient client = null;
//在所有的测试方法之前执行
@Before
public void init() throws Exception {
//设置集群名称
Settings settings = Settings.builder().put("cluster.name", "my-es").build();
//创建client
client = new PreBuiltTransportClient(settings).addTransportAddresses(
new InetSocketTransportAddress(InetAddress.getByName("192.168.100.211"), 9300),
new InetSocketTransportAddress(InetAddress.getByName("192.168.100.212"), 9300),
new InetSocketTransportAddress(InetAddress.getByName("192.168.100.213"), 9300));
}
//创建索引,并配置一些参数
@Test
public void createIndexWithSettings() {
//获取Admin的API
AdminClient admin = client.admin();
//使用Admin API对索引进行操作
IndicesAdminClient indices = admin.indices();
//准备创建索引
indices.prepareCreate("gamelog")
//配置索引参数
.setSettings(
//参数配置器
Settings.builder()//指定索引分区的数量
.put("index.number_of_shards", 4)
//指定索引副本的数量(注意:不包括本身,如果设置数据存储副本为2,实际上数据存储了3份)
.put("index.number_of_replicas", 2)
)
//真正执行
.get();
}
//跟索引添加mapping信息(给表添加schema信息)
@Test
public void putMapping() {
//创建索引
client.admin().indices().prepareCreate("twitter")
//创建一个type,并指定type中属性的名字和类型
.addMapping("tweet",
"{\n" +
" \"tweet\": {\n" +
" \"properties\": {\n" +
" \"message\": {\n" +
" \"type\": \"string\"\n" +
" }\n" +
" }\n" +
" }\n" +
" }")
.get();
}
/**
* 你可以通过dynamic设置来控制这一行为,它能够接受以下的选项:
* true:默认值。动态添加字段
* false:忽略新字段
* strict:如果碰到陌生字段,抛出异常
* @throws IOException
*/
@Test
public void testSettingsMappings() throws IOException {
//1:settings
HashMap<String, Object> settings_map = new HashMap<String, Object>(2);
settings_map.put("number_of_shards", 3);
settings_map.put("number_of_replicas", 2);
//2:mappings(映射、schema)
XContentBuilder builder = XContentFactory.jsonBuilder()
.startObject()
.field("dynamic", "true")
//设置type中的属性
.startObject("properties")
//id属性
.startObject("num")
//类型是integer
.field("type", "integer")
//不分词,但是建索引
.field("index", "not_analyzed")
//在文档中存储
.field("store", "yes")
.endObject()
//name属性
.startObject("name")
//string类型
.field("type", "string")
//在文档中存储
.field("store", "yes")
//建立索引
.field("index", "analyzed")
//使用ik_smart进行分词
.field("analyzer", "ik_smart")
.endObject()
.endObject()
.endObject();
CreateIndexRequestBuilder prepareCreate = client.admin().indices().prepareCreate("user_info");
//管理索引(user_info)然后关联type(user)
prepareCreate.setSettings(settings_map).addMapping("user", builder).get();
}
/**
* XContentBuilder mapping = jsonBuilder()
.startObject()
.startObject("productIndex")
.startObject("properties")
.startObject("title").field("type", "string").field("store", "yes").endObject()
.startObject("description").field("type", "string").field("index", "not_analyzed").endObject()
.startObject("price").field("type", "double").endObject()
.startObject("onSale").field("type", "boolean").endObject()
.startObject("type").field("type", "integer").endObject()
.startObject("createDate").field("type", "date").endObject()
.endObject()
.endObject()
.endObject();
PutMappingRequest mappingRequest = Requests.putMappingRequest("productIndex").type("productIndex").source(mapping);
client.admin().indices().putMapping(mappingRequest).actionGet();
*/
/**
* index这个属性,no代表不建索引
* not_analyzed,建索引不分词
* analyzed 即分词,又建立索引
* expected [no], [not_analyzed] or [analyzed]
* @throws IOException
*/
@Test
public void testSettingsPlayerMappings() throws IOException {
//1:settings
HashMap<String, Object> settings_map = new HashMap<String, Object>(2);
settings_map.put("number_of_shards", 3);
settings_map.put("number_of_replicas", 1);
//2:mappings
XContentBuilder builder = XContentFactory.jsonBuilder()
.startObject()//
.field("dynamic", "true")
.startObject("properties")
.startObject("id")
.field("type", "integer")
.field("store", "yes")
.endObject()
.startObject("name")
.field("type", "string")
.field("index", "not_analyzed")
.endObject()
.startObject("age")
.field("type", "integer")
.endObject()
.startObject("salary")
.field("type", "integer")
.endObject()
.startObject("team")
.field("type", "string")
.field("index", "not_analyzed")
.endObject()
.startObject("position")
.field("type", "string")
.field("index", "not_analyzed")
.endObject()
.startObject("description")
.field("type", "string")
.field("store", "no")
.field("index", "analyzed")
.field("analyzer", "ik_smart")
.endObject()
.startObject("addr")
.field("type", "string")
.field("store", "yes")
.field("index", "analyzed")
.field("analyzer", "ik_smart")
.endObject()
.endObject()
.endObject();
CreateIndexRequestBuilder prepareCreate = client.admin().indices().prepareCreate("player_info");
prepareCreate.setSettings(settings_map).addMapping("player", builder).get();
}
}
ES code study的更多相关文章
- eclipse/intellij idea 远程调试hadoop 2.6.0
很多hadoop初学者估计都我一样,由于没有足够的机器资源,只能在虚拟机里弄一个linux安装hadoop的伪分布,然后在host机上win7里使用eclipse或Intellj idea来写代码测试 ...
- 个人开发者做一款Android App需要知道的事情
个人开发者做一款Android App需要知道的事情 在大学时, 自己是学计算机专业的,而且还和老师一起做过一年半的项目. 有时候是不是有这样的想法,做一个自己的网站.但一直未付诸行动.2012年时, ...
- Excel日期格式单元格写成yyyy.MM.dd格式将无法读取到DataTable
最近在改公司的订单系统,遇到了一个奇怪的问题.C#程序需要从Excel文件中将数据全部读取到DataTable,其中Excel文件的第一列是日期格式yyyy/MM/dd,而这一列中大部分的单元格都是按 ...
- Suricata, to 10Gbps and beyond(X86架构)
Introduction Since the beginning of July 2012, OISF team is able to access to a server where one int ...
- Apache Solr vs Elasticsearch
http://solr-vs-elasticsearch.com/ Apache Solr vs Elasticsearch The Feature Smackdown API Feature Sol ...
- asp.net core ABP模板本地化设置
ABP的语言本地化设置非常方便,甚至地区图标ABP框架都已经有了. 先看看结果吧. 英文的界面 中文的界面 配置流程如下: 首先在Localization目录下新建一个对应的json文件,里面存放对应 ...
- 码农的福利来了, 编程在线Androd 客户端上线了
编程在线下载: 编程在线网站:http://codestudy.sinaapp.com (最新版2.1) 编程在线移动版:http://codestudy.sinaapp.com/mobile/ 编程 ...
- DOS程序员手册(六)
217页 程序的主要部分后面是主程序所使用的许多小的扩充内存功能.将这些功能组合起 来这些功能便覆盖了扩充内存的操作,尽管还可能想向它们添加错误检查. 程序所包含的函数有: emmtest 检验内 ...
- -bash: ./bak_1.py: /usr/bin/python^M: bad interpreter: 没有那个文件或目录
在Windows的PyCharm中编写了一个Python文件,然后上传至CentOS中,已经添加执行权限,但是仍然会报如下的错误: 代码如下: #!/usr/bin/python # -*- codi ...
随机推荐
- TweenMax参数用法中文介绍
TweenMax 建立在 TweenLite 和 TweenFilterLite 基础之上,因此,又揉合了这二者的功能,使得功能更加的齐备,但是如果说易用性,觉得还是 TweenLite 来得方便一些 ...
- [NOIP2018 PJ T4]对称二叉树
题目大意:问一棵有根带权二叉树中最大的对称二叉树子树,对称二叉树为需满足将这棵树所有节点的左右子树交换,新树和原树对应位置的结构相同且点权相等. 题解:在对称二叉树中,对于深度相同的两个节点$u,v$ ...
- 第一个.NET小程序
一.用户需求 做一个简单的网页版销售合同签核系统 1.业务员需要在手机或者电脑上操作,Key入销售合同 2.业务员填入相应的合同信息,对应主管签核 3.最终签核完,生成PDF版的销售合同,且上面自动加 ...
- Java中 StringBuffer StringBuilder String 区别
String 字符串常量 不可变 使用字符串拼接时是不同的2个空间 StringBuffer 字符串变量 可变 线程安全 字符串拼接直接在字符串后追加 StringBui ...
- ADO.NET 七(一个例子)
通过一个完整的实例实现课程信息管理功能的操作,包括查询.修改.删除课程信息等操作. 1) 创建课程信息表 create table StuCourse ( id int primary key ide ...
- 2.将多个元素设置为同一行?清除浮动有几种方式?【HTML】
1.将多个元素设置为同一行:float,inline-block 清除浮动的方式: 方法一:添加新的元素 .应用 clear:both: 方法二:父级div定义 overflow: hidden: 方 ...
- JAVA - 普通类读取WEB-INF里面配置文件
服务器:Tomcat 9 注意问题:配置文件应该放入Tomcat的正式工程目录中测试. 可用代码: package com.daoen.rtis.test; import java.io.FileRe ...
- VsCode使用setting sync 同步自己的插件和设置等
直接再 Vscode中安装就可以,然后: 1. 可以点看setting sync插件在vscode 这个时候可以按照提示进行设置(也可以参考下:https://www.cnblogs.com/kenz ...
- Computer Vision_33_SIFT:Fast Adaptive Bilateral Filtering——2018
此部分是计算机视觉部分,主要侧重在底层特征提取,视频分析,跟踪,目标检测和识别方面等方面.对于自己不太熟悉的领域比如摄像机标定和立体视觉,仅仅列出上google上引用次数比较多的文献.有一些刚刚出版的 ...
- CentOS7- ABRT has detected 1 problem(s). For more info run: abrt-cli list --since 1548988705
CentOS7重启后,xshell连接,后出现ABRT has detected 1 problem(s). For more info run: abrt-cli list --since 1548 ...