avro-1.8.1 serialize BigDecimal and Short error fix.
1. create mysql table like
CREATE TABLE `test` (
`a` tinyint(4) NOT NULL DEFAULT '',
`b` decimal(12,0) DEFAULT NULL,
`c` decimal(5,0) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
2. start kafka connect using Debezium mysql plugin
{
"name": "inventory-connector",
"config": {
"connector.class": "io.debezium.connector.mysql.MySqlConnector",
"tasks.max": "1",
"database.hostname": "localhost",
"database.port": "3306",
"database.user": "root",
"database.password": "root",
"database.server.id": "223344",
"database.server.name": "localhost",
"database.whitelist": "inventory",
"table.whitelist":"inventory.test",
"database.history.kafka.bootstrap.servers": "localhost:9092",
"database.history.kafka.topic": "schema-changes.inventory",
"include.schema.changes":"false",
"transforms": "extractField",
"transforms.extractField.type": "com.centchain.kafka.connect.mysql.DebeziumMysql$Value",
"transforms.extractField.field": "after"
}
}
3. get errors:
[-- ::,] INFO WorkerSourceTask{id=cashier--} flushing outstanding messages for offset commit (org.apache.kafka.connect.runtime.WorkerSourceTask:)
[-- ::,] ERROR WorkerSourceTask{id=cashier--} Task threw an uncaught and unrecoverable exception (org.apache.kafka.connect.runtime.WorkerTask:)
org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.WorkerSourceTask.convertTransformedRecord(WorkerSourceTask.java:)
at org.apache.kafka.connect.runtime.WorkerSourceTask.sendRecords(WorkerSourceTask.java:)
at org.apache.kafka.connect.runtime.WorkerSourceTask.execute(WorkerSourceTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:)
at java.util.concurrent.FutureTask.run(FutureTask.java:)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:)
at java.lang.Thread.run(Thread.java:)
Caused by: org.apache.avro.AvroRuntimeException: Unknown datum class: class java.lang.Short
at org.apache.avro.util.internal.JacksonUtils.toJson(JacksonUtils.java:)
at org.apache.avro.util.internal.JacksonUtils.toJsonNode(JacksonUtils.java:)
at org.apache.avro.Schema$Field.<init>(Schema.java:)
at org.apache.avro.Schema$Field.<init>(Schema.java:)
at io.confluent.connect.avro.AvroData.addAvroRecordField(AvroData.java:)
at io.confluent.connect.avro.AvroData.fromConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.fromConnectSchema(AvroDa」
4. fix
file location: avro-release-1.8.1/lang/java/avro/src/main/java/org/apache/avro/util/internal/JacksonUtils.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.internal; import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.JsonProperties;
import org.apache.avro.Schema;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.util.TokenBuffer; public class JacksonUtils {
static final String BYTES_CHARSET = "ISO-8859-1"; private JacksonUtils() {
} public static JsonNode toJsonNode(Object datum) {
if (datum == null) {
return null;
}
try {
TokenBuffer generator = new TokenBuffer(new ObjectMapper());
toJson(datum, generator);
return new ObjectMapper().readTree(generator.asParser());
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
} @SuppressWarnings(value="unchecked")
static void toJson(Object datum, JsonGenerator generator) throws IOException {
if (datum == JsonProperties.NULL_VALUE) { // null
generator.writeNull();
} else if (datum instanceof Map) { // record, map
generator.writeStartObject();
for (Map.Entry<Object,Object> entry : ((Map<Object,Object>) datum).entrySet()) {
generator.writeFieldName(entry.getKey().toString());
toJson(entry.getValue(), generator);
}
generator.writeEndObject();
} else if (datum instanceof Collection) { // array
generator.writeStartArray();
for (Object element : (Collection<?>) datum) {
toJson(element, generator);
}
generator.writeEndArray();
} else if (datum instanceof byte[]) { // bytes, fixed
generator.writeString(new String((byte[]) datum, BYTES_CHARSET));
} else if (datum instanceof CharSequence || datum instanceof Enum<?>) { // string, enum
generator.writeString(datum.toString());
} else if (datum instanceof Double) { // double
generator.writeNumber((Double) datum);
} else if (datum instanceof Float) { // float
generator.writeNumber((Float) datum);
} else if (datum instanceof Long) { // long
generator.writeNumber((Long) datum);
} else if (datum instanceof Integer) { // int
generator.writeNumber((Integer) datum);
}else if ( datum instanceof Short) { // short
generator.writeNumber(new Integer(datum.toString()));
}else if (datum instanceof Boolean) { // boolean
generator.writeBoolean((Boolean) datum);
}
else if (datum instanceof BigDecimal){
generator.writeNumber((BigDecimal) datum);
} else {
throw new AvroRuntimeException("Unknown datum class: " + datum.getClass());
}
} public static Object toObject(JsonNode jsonNode) {
return toObject(jsonNode, null);
} public static Object toObject(JsonNode jsonNode, Schema schema) {
if (schema != null && schema.getType().equals(Schema.Type.UNION)) {
return toObject(jsonNode, schema.getTypes().get(0));
}
if (jsonNode == null) {
return null;
} else if (jsonNode.isNull()) {
return JsonProperties.NULL_VALUE;
} else if (jsonNode.isBoolean()) {
return jsonNode.asBoolean();
} else if (jsonNode.isInt()) {
if (schema == null || schema.getType().equals(Schema.Type.INT)) {
return jsonNode.asInt();
} else if (schema.getType().equals(Schema.Type.LONG)) {
return jsonNode.asLong();
}
}else if (jsonNode.isBigDecimal()){
return jsonNode.asDouble();
}else if (jsonNode.isLong()) {
return jsonNode.asLong();
} else if (jsonNode.isDouble()) {
if (schema == null || schema.getType().equals(Schema.Type.DOUBLE)) {
return jsonNode.asDouble();
} else if (schema.getType().equals(Schema.Type.FLOAT)) {
return (float) jsonNode.asDouble();
}
} else if (jsonNode.isTextual()) {
if (schema == null || schema.getType().equals(Schema.Type.STRING) ||
schema.getType().equals(Schema.Type.ENUM)) {
return jsonNode.asText();
} else if (schema.getType().equals(Schema.Type.BYTES)) {
try {
return jsonNode.getTextValue().getBytes(BYTES_CHARSET);
} catch (UnsupportedEncodingException e) {
throw new AvroRuntimeException(e);
}
}
} else if (jsonNode.isArray()) {
List l = new ArrayList();
for (JsonNode node : jsonNode) {
l.add(toObject(node, schema == null ? null : schema.getElementType()));
}
return l;
} else if (jsonNode.isObject()) {
Map m = new LinkedHashMap();
for (Iterator<String> it = jsonNode.getFieldNames(); it.hasNext(); ) {
String key = it.next();
Schema s = null;
if (schema == null) {
s = null;
} else if (schema.getType().equals(Schema.Type.MAP)) {
s = schema.getValueType();
} else if (schema.getType().equals(Schema.Type.RECORD)) {
s = schema.getField(key).schema();
}
Object value = toObject(jsonNode.get(key), s);
m.put(key, value);
}
return m;
}
return null;
}
}
The key is in
line 85-86 which fix error for short
line 90-91,117-118 which fix error for BigDecimal
5. result:
5.1 mysql -> kafka
lenmom@M1701:~/workspace/software/confluent-community-5.1.-2.11$ bin/kafka-avro-console-consumer --bootstrap-server 127.0.0.1: --from-beginning --topic localhost.a.test
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0001"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0002"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0003"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0004"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
5.2 kafka-hive
command config for connector:
{
"name": "hive-sink",
"config": {
"connector.class": "io.confluent.connect.hdfs.HdfsSinkConnector",
"tasks.max": "",
"topics": "localhost.a.test",
"hdfs.url": "hdfs://127.0.0.1:9000/",
"logs.dir": "/logs",
"topics.dir": "/inventory/",
"hadoop.conf.dir": "/home/lenmom/workspace/software/hadoop-2.7.3/etc/hadoop/",
"flush.size": "",
"rotate.interval.ms": "",
"hive.integration": true,
"hive.database": "inventory",
"partitioner.class":"io.confluent.connect.hdfs.partitioner.FieldPartitioner",
"partition.field.name":"pt_log_d",
"hive.metastore.uris": "thrift://127.0.0.1:9083",
"schema.compatibility": "BACKWARD"
}
}
result:
hive> select * from localhost_a_test;
OK
c -- ::17.029
c -- ::17.029
c -- ::17.029
c -- ::17.029
Time taken: 0.168 seconds, Fetched: row(s)
6. for table schema
CREATE TABLE `decimalTest` (
`POINTSDAY` decimal(12,0) NOT NULL DEFAULT '' ,
`POINTSMONTH` decimal(12,0) NOT NULL DEFAULT '' ,
`CASHDAY` decimal(12,0) NOT NULL DEFAULT '' ,
`CASHMONTH` decimal(12,0) NOT NULL DEFAULT ''
) insert into decimalTest values(1,2,3);
if we use hdfs-connector to sink to hive, we would get error like
[-- ::,] ERROR WorkerSinkTask{id=hive-sink-} Task threw an uncaught and unrecoverable exception (org.apache.kafka.connect.runtime.WorkerTask:)
org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.convertAndTransformRecord(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.convertMessages(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:)
at java.util.concurrent.FutureTask.run(FutureTask.java:)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:)
at java.lang.Thread.run(Thread.java:)
Caused by: org.apache.kafka.connect.errors.SchemaBuilderException: Invalid default value
at org.apache.kafka.connect.data.SchemaBuilder.defaultValue(SchemaBuilder.java:)
at io.confluent.connect.avro.AvroData.toConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.toConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.toConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.toConnectData(AvroData.java:)
at io.confluent.connect.avro.AvroConverter.toConnectData(AvroConverter.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:)
... more
Caused by: org.apache.kafka.connect.errors.DataException: Invalid value: null used for required field: "null", schema type: BYTES
at org.apache.kafka.connect.data.ConnectSchema.validateValue(ConnectSchema.java:)
at org.apache.kafka.connect.data.ConnectSchema.validateValue(ConnectSchema.java:)
at org.apache.kafka.connect.data.SchemaBuilder.defaultValue(SchemaBuilder.java:)
... more
[-- ::,] ERROR WorkerSinkTask{id=hive-sink-} Task is being killed and will not recover until manually restarted (org.apache.kafka.connect.runtime.WorkerTask:)
which is caused by serialization decimal using kafka-avro-convertor error.
To fix this error, I added logic
6.1 /schema-registry-5.1.0/avro-converter/src/main/java/io/confluent/connect/avro/AvroData.java
private Object defaultValueFromAvro(Schema schema,
org.apache.avro.Schema avroSchema,
Object value,
ToConnectContext toConnectContext) {
// The type will be JsonNode if this default was pulled from a Connect default field, or an
// Object if it's the actual Avro-specified default. If it's a regular Java object, we can
// use our existing conversion tools.
if (!(value instanceof JsonNode)) {
return toConnectData(schema, value, toConnectContext);
} JsonNode jsonValue = (JsonNode) value;
switch (avroSchema.getType()) {
case INT:
if (schema.type() == Schema.Type.INT8) {
return (byte) jsonValue.getIntValue();
} else if (schema.type() == Schema.Type.INT16) {
return (short) jsonValue.getIntValue();
} else if (schema.type() == Schema.Type.INT32) {
return jsonValue.getIntValue();
} else {
break;
} case LONG:
return jsonValue.getLongValue(); case FLOAT:
return (float) jsonValue.getDoubleValue();
case DOUBLE:
return jsonValue.getDoubleValue(); case BOOLEAN:
return jsonValue.asBoolean(); case NULL:
return null; case STRING:
case ENUM:
return jsonValue.asText(); 43 case BYTES:
44 return jsonValue.getDecimalValue();
45 case FIXED:
46 try {
47 return jsonValue.getBinaryValue();
48 } catch (IOException e) {
49 throw new DataException("Invalid binary data in default value");
50 }
// return convertIntegerToBytes(jsonValue.getIntValue());
// return jsonValue.getIntValue(); case ARRAY: {
if (!jsonValue.isArray()) {
throw new DataException("Invalid JSON for array default value: " + jsonValue.toString());
}
List<Object> result = new ArrayList<>(jsonValue.size());
for (JsonNode elem : jsonValue) {
result.add(
defaultValueFromAvro(schema, avroSchema.getElementType(), elem, toConnectContext));
}
return result;
} case MAP: {
if (!jsonValue.isObject()) {
throw new DataException("Invalid JSON for map default value: " + jsonValue.toString());
}
Map<String, Object> result = new HashMap<>(jsonValue.size());
Iterator<Map.Entry<String, JsonNode>> fieldIt = jsonValue.getFields();
while (fieldIt.hasNext()) {
Map.Entry<String, JsonNode> field = fieldIt.next();
Object converted = defaultValueFromAvro(
schema, avroSchema.getElementType(), field.getValue(), toConnectContext);
result.put(field.getKey(), converted);
}
return result;
} case RECORD: {
if (!jsonValue.isObject()) {
throw new DataException("Invalid JSON for record default value: " + jsonValue.toString());
} Struct result = new Struct(schema);
for (org.apache.avro.Schema.Field avroField : avroSchema.getFields()) {
Field field = schema.field(avroField.name());
JsonNode fieldJson = ((JsonNode) value).get(field.name());
Object converted = defaultValueFromAvro(
field.schema(), avroField.schema(), fieldJson, toConnectContext);
result.put(avroField.name(), converted);
}
return result;
} case UNION: {
// Defaults must match first type
org.apache.avro.Schema memberAvroSchema = avroSchema.getTypes().get();
if (memberAvroSchema.getType() == org.apache.avro.Schema.Type.NULL) {
return null;
} else {
return defaultValueFromAvro(
schema.field(unionMemberFieldName(memberAvroSchema)).schema(),
memberAvroSchema,
value,
toConnectContext);
}
}
default: {
return null;
}
}
return null;
}
after the fix, rebuild the jar and replace the file kafka-connect-avro-converter-5.1.0.jar in confluent kafka installation dir.
then the data should be able to sink to hive now.
hive> select * from decimalTest limit ;
[WARNING] Avro: Invalid default for field POINTSDAY: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
[WARNING] Avro: Invalid default for field POINTSMONTH: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
[WARNING] Avro: Invalid default for field CASHDAY: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
[WARNING] Avro: Invalid default for field CASHMONTH: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
6.2 as we ca see from above, there's warnings if we query data in hive, to elemiate the warnings
/avro-release-1.8.1/lang/java/avro/src/main/java/org/apache/avro/Schema.java
private static boolean isValidDefault(Schema schema, JsonNode defaultValue) {
if (defaultValue == null)
return false;
switch (schema.getType()) { case BYTES:
7 if (schema.logicalType.getName().equals("decimal")||
8 schema.logicalType.getName().toLowerCase().equals("bigdecimal")){
9 return defaultValue.isBigDecimal();
10 }
11 else{
12 return defaultValue.isTextual();
13 }
case STRING:
case ENUM:
case FIXED:
17 return defaultValue.isTextual();
case INT:
case LONG:
case FLOAT:
case DOUBLE:
return defaultValue.isNumber();
case BOOLEAN:
return defaultValue.isBoolean();
case NULL:
return defaultValue.isNull();
case ARRAY:
if (!defaultValue.isArray())
return false;
for (JsonNode element : defaultValue)
if (!isValidDefault(schema.getElementType(), element))
return false;
return true;
case MAP:
if (!defaultValue.isObject())
return false;
for (JsonNode value : defaultValue)
if (!isValidDefault(schema.getValueType(), value))
return false;
return true;
case UNION: // union default: first branch
return isValidDefault(schema.getTypes().get(0), defaultValue);
case RECORD:
if (!defaultValue.isObject())
return false;
for (Field field : schema.getFields())
if (!isValidDefault(field.schema(),
defaultValue.has(field.name())
? defaultValue.get(field.name())
: field.defaultValue()))
return false;
return true;
default:
return false;
}
}
after the fix, replace the jar file in $HIVE_HOME and $CONFLUENT_KAFKA_HOME installation dir.
avro-1.8.1 serialize BigDecimal and Short error fix.的更多相关文章
- Java BigDecimal详解,提供了丰富的四舍五入规则
java.math.BigDecimal类提供用于算术,刻度操作,舍入,比较,哈希算法和格式转换操作. toString()方法提供BigDecimal的规范表示.它使用户可以完全控制舍入行为. 提供 ...
- BigDecimal 小数 浮点数 精度 财务计算
简介 float和double类型的使用局限: 单精度浮点型变量float可以处理6~7位有效数,双精度浮点型变量double可以处理15~16位有效数,在实际应用中,如果需要对更大或者更小的数进行运 ...
- BigDecimal类(高精度小数)
位置:java.math.BigDecimal 作用:提供高精度小数数据类型及相关操作 一.基本介绍 BigDecimal为不可变的.任意精度的有符号十进制数,其值为(unscaledValue * ...
- dubbo/dubbox 增加原生thrift及avro支持
(facebook) thrift / (hadoop) avro / (google) probuf(grpc)是近几年来比较抢眼的高效序列化/rpc框架,dubbo框架虽然有thrift的支持,但 ...
- Avro RPC 之 Protocol 定义和代码生成
摘自http://avro.apache.org/docs/current/spec.html#Protocol+Declaration,1.7.6版 Protocol Declaration Avr ...
- BigDecimal遇到的问题,大伙也说说
一:相除精度丢失的问题 BigDecimal的api除法相对加减乘要实现的复杂多了,只介绍常用的我遇到的问题: 问题:两数相除,如果9/3=3整除没问题,但是10/3=0.33333333...... ...
- BigDecimal常被忽略的问题
一:相除精度丢失的问题 BigDecimal的api除法相对加减乘要实现的复杂多了,只介绍常用的我遇到的问题: 问题:两数相除,如果9/3=3整除没问题,但是10/3=0.33333333...... ...
- Java在ACM中的应用
Java在ACM中的应用 —. 在java中的基本头文件(java中叫包) import java.io.*; import java.util.*; //输入Scanner import java. ...
- [转]unity3d 脚本参考-技术文档
unity3d 脚本参考-技术文档 核心提示:一.脚本概览这是一个关于Unity内部脚本如何工作的简单概览.Unity内部的脚本,是通过附加自定义脚本对象到游戏物体组成的.在脚本对象内部不同志的函数被 ...
随机推荐
- 大数据之路week07--day01(HDFS学习,Java代码操作HDFS,将HDFS文件内容存入到Mysql)
一.HDFS概述 数据量越来越多,在一个操作系统管辖的范围存不下了,那么就分配到更多的操作系统管理的磁盘中,但是不方便管理和维护,因此迫切需要一种系统来管理多台机器上的文件,这就是分布式文件管理系统 ...
- idea中关闭当前文件快捷键
一.idea中关闭当前文件快捷键 1.一般编辑器中关闭当前文件快捷键为ctrl+w 2.而idea中默认为Ctrl+F4 3.可以将其改为自己熟悉的键位. 4.settings——>keymap ...
- celery timeout的拦截
0X01 场景 celery任务超时报错,想查看是传入哪一类数据运行时导致的超时(哪一个插件),但是该报错难以拦截. [2019-06-30 17:23:21,070: ERROR/MainProce ...
- fiddler修改请求和返回
一.修改请求 1.先设置请求前断点 2.设置拦截,在左下角的QuickExec命令行中输入bpu www.baidu.com/XXXX 3.选中需要修改的请求,选中Inspectors面板,使用Raw ...
- jQuery模拟键盘打字逐字逐句显示文本
jQuery模拟键盘打字逐字逐句显示文本 html代码 <!doctype html> <html lang="zh"> <head> < ...
- 洛谷 UVA10298 Power Strings 题解
Analysis 结论:设字符串长度为n,最长相同前后缀的长度为kmp[i],如n%(n-kmp[n])=0,则答案为n/(n-kmp[n]),否则为1. 如果循环节多于一个,以前n-kmp[n]个为 ...
- Greenplum 调优--数据倾斜排查(一)
对于分布式数据库来说,QUERY的运行效率取决于最慢的那个节点. 当数据出现倾斜时,某些节点的运算量可能比其他节点大.除了带来运行慢的问题,还有其他的问题,例如导致OOM,或者DISK FULL等问题 ...
- pyexcel_xlsx
from pyexcel_xlsx import get_data,save_data excel_data = get_data('xxxx.xlsx文件存储位置') #得到的excel_data是 ...
- jQuery相关方法2
一.元素样式设置的方式(css,json键值对,链式编程) <script src="http://libs.baidu.com/jquery/1.10.2/jquery.min.js ...
- 2019 ICPC Asia Yinchuan Regional
目录 Contest Info Solutions A. Girls Band Party B. So Easy D. Easy Problem E. XOR Tree F. Function! G. ...