avro-1.8.1 serialize BigDecimal and Short error fix.
1. create mysql table like
CREATE TABLE `test` (
`a` tinyint(4) NOT NULL DEFAULT '',
`b` decimal(12,0) DEFAULT NULL,
`c` decimal(5,0) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
2. start kafka connect using Debezium mysql plugin
{
"name": "inventory-connector",
"config": {
"connector.class": "io.debezium.connector.mysql.MySqlConnector",
"tasks.max": "1",
"database.hostname": "localhost",
"database.port": "3306",
"database.user": "root",
"database.password": "root",
"database.server.id": "223344",
"database.server.name": "localhost",
"database.whitelist": "inventory",
"table.whitelist":"inventory.test",
"database.history.kafka.bootstrap.servers": "localhost:9092",
"database.history.kafka.topic": "schema-changes.inventory",
"include.schema.changes":"false",
"transforms": "extractField",
"transforms.extractField.type": "com.centchain.kafka.connect.mysql.DebeziumMysql$Value",
"transforms.extractField.field": "after"
}
}
3. get errors:
[-- ::,] INFO WorkerSourceTask{id=cashier--} flushing outstanding messages for offset commit (org.apache.kafka.connect.runtime.WorkerSourceTask:)
[-- ::,] ERROR WorkerSourceTask{id=cashier--} Task threw an uncaught and unrecoverable exception (org.apache.kafka.connect.runtime.WorkerTask:)
org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.WorkerSourceTask.convertTransformedRecord(WorkerSourceTask.java:)
at org.apache.kafka.connect.runtime.WorkerSourceTask.sendRecords(WorkerSourceTask.java:)
at org.apache.kafka.connect.runtime.WorkerSourceTask.execute(WorkerSourceTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:)
at java.util.concurrent.FutureTask.run(FutureTask.java:)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:)
at java.lang.Thread.run(Thread.java:)
Caused by: org.apache.avro.AvroRuntimeException: Unknown datum class: class java.lang.Short
at org.apache.avro.util.internal.JacksonUtils.toJson(JacksonUtils.java:)
at org.apache.avro.util.internal.JacksonUtils.toJsonNode(JacksonUtils.java:)
at org.apache.avro.Schema$Field.<init>(Schema.java:)
at org.apache.avro.Schema$Field.<init>(Schema.java:)
at io.confluent.connect.avro.AvroData.addAvroRecordField(AvroData.java:)
at io.confluent.connect.avro.AvroData.fromConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.fromConnectSchema(AvroDa」
4. fix
file location: avro-release-1.8.1/lang/java/avro/src/main/java/org/apache/avro/util/internal/JacksonUtils.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.internal; import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.JsonProperties;
import org.apache.avro.Schema;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.util.TokenBuffer; public class JacksonUtils {
static final String BYTES_CHARSET = "ISO-8859-1"; private JacksonUtils() {
} public static JsonNode toJsonNode(Object datum) {
if (datum == null) {
return null;
}
try {
TokenBuffer generator = new TokenBuffer(new ObjectMapper());
toJson(datum, generator);
return new ObjectMapper().readTree(generator.asParser());
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
} @SuppressWarnings(value="unchecked")
static void toJson(Object datum, JsonGenerator generator) throws IOException {
if (datum == JsonProperties.NULL_VALUE) { // null
generator.writeNull();
} else if (datum instanceof Map) { // record, map
generator.writeStartObject();
for (Map.Entry<Object,Object> entry : ((Map<Object,Object>) datum).entrySet()) {
generator.writeFieldName(entry.getKey().toString());
toJson(entry.getValue(), generator);
}
generator.writeEndObject();
} else if (datum instanceof Collection) { // array
generator.writeStartArray();
for (Object element : (Collection<?>) datum) {
toJson(element, generator);
}
generator.writeEndArray();
} else if (datum instanceof byte[]) { // bytes, fixed
generator.writeString(new String((byte[]) datum, BYTES_CHARSET));
} else if (datum instanceof CharSequence || datum instanceof Enum<?>) { // string, enum
generator.writeString(datum.toString());
} else if (datum instanceof Double) { // double
generator.writeNumber((Double) datum);
} else if (datum instanceof Float) { // float
generator.writeNumber((Float) datum);
} else if (datum instanceof Long) { // long
generator.writeNumber((Long) datum);
} else if (datum instanceof Integer) { // int
generator.writeNumber((Integer) datum);
}else if ( datum instanceof Short) { // short
generator.writeNumber(new Integer(datum.toString()));
}else if (datum instanceof Boolean) { // boolean
generator.writeBoolean((Boolean) datum);
}
else if (datum instanceof BigDecimal){
generator.writeNumber((BigDecimal) datum);
} else {
throw new AvroRuntimeException("Unknown datum class: " + datum.getClass());
}
} public static Object toObject(JsonNode jsonNode) {
return toObject(jsonNode, null);
} public static Object toObject(JsonNode jsonNode, Schema schema) {
if (schema != null && schema.getType().equals(Schema.Type.UNION)) {
return toObject(jsonNode, schema.getTypes().get(0));
}
if (jsonNode == null) {
return null;
} else if (jsonNode.isNull()) {
return JsonProperties.NULL_VALUE;
} else if (jsonNode.isBoolean()) {
return jsonNode.asBoolean();
} else if (jsonNode.isInt()) {
if (schema == null || schema.getType().equals(Schema.Type.INT)) {
return jsonNode.asInt();
} else if (schema.getType().equals(Schema.Type.LONG)) {
return jsonNode.asLong();
}
}else if (jsonNode.isBigDecimal()){
return jsonNode.asDouble();
}else if (jsonNode.isLong()) {
return jsonNode.asLong();
} else if (jsonNode.isDouble()) {
if (schema == null || schema.getType().equals(Schema.Type.DOUBLE)) {
return jsonNode.asDouble();
} else if (schema.getType().equals(Schema.Type.FLOAT)) {
return (float) jsonNode.asDouble();
}
} else if (jsonNode.isTextual()) {
if (schema == null || schema.getType().equals(Schema.Type.STRING) ||
schema.getType().equals(Schema.Type.ENUM)) {
return jsonNode.asText();
} else if (schema.getType().equals(Schema.Type.BYTES)) {
try {
return jsonNode.getTextValue().getBytes(BYTES_CHARSET);
} catch (UnsupportedEncodingException e) {
throw new AvroRuntimeException(e);
}
}
} else if (jsonNode.isArray()) {
List l = new ArrayList();
for (JsonNode node : jsonNode) {
l.add(toObject(node, schema == null ? null : schema.getElementType()));
}
return l;
} else if (jsonNode.isObject()) {
Map m = new LinkedHashMap();
for (Iterator<String> it = jsonNode.getFieldNames(); it.hasNext(); ) {
String key = it.next();
Schema s = null;
if (schema == null) {
s = null;
} else if (schema.getType().equals(Schema.Type.MAP)) {
s = schema.getValueType();
} else if (schema.getType().equals(Schema.Type.RECORD)) {
s = schema.getField(key).schema();
}
Object value = toObject(jsonNode.get(key), s);
m.put(key, value);
}
return m;
}
return null;
}
}
The key is in
line 85-86 which fix error for short
line 90-91,117-118 which fix error for BigDecimal
5. result:
5.1 mysql -> kafka
lenmom@M1701:~/workspace/software/confluent-community-5.1.-2.11$ bin/kafka-avro-console-consumer --bootstrap-server 127.0.0.1: --from-beginning --topic localhost.a.test
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0001"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0002"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0003"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0004"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
5.2 kafka-hive
command config for connector:
{
"name": "hive-sink",
"config": {
"connector.class": "io.confluent.connect.hdfs.HdfsSinkConnector",
"tasks.max": "",
"topics": "localhost.a.test",
"hdfs.url": "hdfs://127.0.0.1:9000/",
"logs.dir": "/logs",
"topics.dir": "/inventory/",
"hadoop.conf.dir": "/home/lenmom/workspace/software/hadoop-2.7.3/etc/hadoop/",
"flush.size": "",
"rotate.interval.ms": "",
"hive.integration": true,
"hive.database": "inventory",
"partitioner.class":"io.confluent.connect.hdfs.partitioner.FieldPartitioner",
"partition.field.name":"pt_log_d",
"hive.metastore.uris": "thrift://127.0.0.1:9083",
"schema.compatibility": "BACKWARD"
}
}
result:
hive> select * from localhost_a_test;
OK
c -- ::17.029
c -- ::17.029
c -- ::17.029
c -- ::17.029
Time taken: 0.168 seconds, Fetched: row(s)
6. for table schema
CREATE TABLE `decimalTest` (
`POINTSDAY` decimal(12,0) NOT NULL DEFAULT '' ,
`POINTSMONTH` decimal(12,0) NOT NULL DEFAULT '' ,
`CASHDAY` decimal(12,0) NOT NULL DEFAULT '' ,
`CASHMONTH` decimal(12,0) NOT NULL DEFAULT ''
) insert into decimalTest values(1,2,3);
if we use hdfs-connector to sink to hive, we would get error like
[-- ::,] ERROR WorkerSinkTask{id=hive-sink-} Task threw an uncaught and unrecoverable exception (org.apache.kafka.connect.runtime.WorkerTask:)
org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.convertAndTransformRecord(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.convertMessages(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:)
at java.util.concurrent.FutureTask.run(FutureTask.java:)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:)
at java.lang.Thread.run(Thread.java:)
Caused by: org.apache.kafka.connect.errors.SchemaBuilderException: Invalid default value
at org.apache.kafka.connect.data.SchemaBuilder.defaultValue(SchemaBuilder.java:)
at io.confluent.connect.avro.AvroData.toConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.toConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.toConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.toConnectData(AvroData.java:)
at io.confluent.connect.avro.AvroConverter.toConnectData(AvroConverter.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:)
... more
Caused by: org.apache.kafka.connect.errors.DataException: Invalid value: null used for required field: "null", schema type: BYTES
at org.apache.kafka.connect.data.ConnectSchema.validateValue(ConnectSchema.java:)
at org.apache.kafka.connect.data.ConnectSchema.validateValue(ConnectSchema.java:)
at org.apache.kafka.connect.data.SchemaBuilder.defaultValue(SchemaBuilder.java:)
... more
[-- ::,] ERROR WorkerSinkTask{id=hive-sink-} Task is being killed and will not recover until manually restarted (org.apache.kafka.connect.runtime.WorkerTask:)
which is caused by serialization decimal using kafka-avro-convertor error.
To fix this error, I added logic
6.1 /schema-registry-5.1.0/avro-converter/src/main/java/io/confluent/connect/avro/AvroData.java
private Object defaultValueFromAvro(Schema schema,
org.apache.avro.Schema avroSchema,
Object value,
ToConnectContext toConnectContext) {
// The type will be JsonNode if this default was pulled from a Connect default field, or an
// Object if it's the actual Avro-specified default. If it's a regular Java object, we can
// use our existing conversion tools.
if (!(value instanceof JsonNode)) {
return toConnectData(schema, value, toConnectContext);
} JsonNode jsonValue = (JsonNode) value;
switch (avroSchema.getType()) {
case INT:
if (schema.type() == Schema.Type.INT8) {
return (byte) jsonValue.getIntValue();
} else if (schema.type() == Schema.Type.INT16) {
return (short) jsonValue.getIntValue();
} else if (schema.type() == Schema.Type.INT32) {
return jsonValue.getIntValue();
} else {
break;
} case LONG:
return jsonValue.getLongValue(); case FLOAT:
return (float) jsonValue.getDoubleValue();
case DOUBLE:
return jsonValue.getDoubleValue(); case BOOLEAN:
return jsonValue.asBoolean(); case NULL:
return null; case STRING:
case ENUM:
return jsonValue.asText(); 43 case BYTES:
44 return jsonValue.getDecimalValue();
45 case FIXED:
46 try {
47 return jsonValue.getBinaryValue();
48 } catch (IOException e) {
49 throw new DataException("Invalid binary data in default value");
50 }
// return convertIntegerToBytes(jsonValue.getIntValue());
// return jsonValue.getIntValue(); case ARRAY: {
if (!jsonValue.isArray()) {
throw new DataException("Invalid JSON for array default value: " + jsonValue.toString());
}
List<Object> result = new ArrayList<>(jsonValue.size());
for (JsonNode elem : jsonValue) {
result.add(
defaultValueFromAvro(schema, avroSchema.getElementType(), elem, toConnectContext));
}
return result;
} case MAP: {
if (!jsonValue.isObject()) {
throw new DataException("Invalid JSON for map default value: " + jsonValue.toString());
}
Map<String, Object> result = new HashMap<>(jsonValue.size());
Iterator<Map.Entry<String, JsonNode>> fieldIt = jsonValue.getFields();
while (fieldIt.hasNext()) {
Map.Entry<String, JsonNode> field = fieldIt.next();
Object converted = defaultValueFromAvro(
schema, avroSchema.getElementType(), field.getValue(), toConnectContext);
result.put(field.getKey(), converted);
}
return result;
} case RECORD: {
if (!jsonValue.isObject()) {
throw new DataException("Invalid JSON for record default value: " + jsonValue.toString());
} Struct result = new Struct(schema);
for (org.apache.avro.Schema.Field avroField : avroSchema.getFields()) {
Field field = schema.field(avroField.name());
JsonNode fieldJson = ((JsonNode) value).get(field.name());
Object converted = defaultValueFromAvro(
field.schema(), avroField.schema(), fieldJson, toConnectContext);
result.put(avroField.name(), converted);
}
return result;
} case UNION: {
// Defaults must match first type
org.apache.avro.Schema memberAvroSchema = avroSchema.getTypes().get();
if (memberAvroSchema.getType() == org.apache.avro.Schema.Type.NULL) {
return null;
} else {
return defaultValueFromAvro(
schema.field(unionMemberFieldName(memberAvroSchema)).schema(),
memberAvroSchema,
value,
toConnectContext);
}
}
default: {
return null;
}
}
return null;
}
after the fix, rebuild the jar and replace the file kafka-connect-avro-converter-5.1.0.jar in confluent kafka installation dir.
then the data should be able to sink to hive now.
hive> select * from decimalTest limit ;
[WARNING] Avro: Invalid default for field POINTSDAY: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
[WARNING] Avro: Invalid default for field POINTSMONTH: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
[WARNING] Avro: Invalid default for field CASHDAY: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
[WARNING] Avro: Invalid default for field CASHMONTH: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
6.2 as we ca see from above, there's warnings if we query data in hive, to elemiate the warnings
/avro-release-1.8.1/lang/java/avro/src/main/java/org/apache/avro/Schema.java
private static boolean isValidDefault(Schema schema, JsonNode defaultValue) {
if (defaultValue == null)
return false;
switch (schema.getType()) {
case BYTES:
7 if (schema.logicalType.getName().equals("decimal")||
8 schema.logicalType.getName().toLowerCase().equals("bigdecimal")){
9 return defaultValue.isBigDecimal();
10 }
11 else{
12 return defaultValue.isTextual();
13 }
case STRING:
case ENUM:
case FIXED:
17 return defaultValue.isTextual();
case INT:
case LONG:
case FLOAT:
case DOUBLE:
return defaultValue.isNumber();
case BOOLEAN:
return defaultValue.isBoolean();
case NULL:
return defaultValue.isNull();
case ARRAY:
if (!defaultValue.isArray())
return false;
for (JsonNode element : defaultValue)
if (!isValidDefault(schema.getElementType(), element))
return false;
return true;
case MAP:
if (!defaultValue.isObject())
return false;
for (JsonNode value : defaultValue)
if (!isValidDefault(schema.getValueType(), value))
return false;
return true;
case UNION: // union default: first branch
return isValidDefault(schema.getTypes().get(0), defaultValue);
case RECORD:
if (!defaultValue.isObject())
return false;
for (Field field : schema.getFields())
if (!isValidDefault(field.schema(),
defaultValue.has(field.name())
? defaultValue.get(field.name())
: field.defaultValue()))
return false;
return true;
default:
return false;
}
}
after the fix, replace the jar file in $HIVE_HOME and $CONFLUENT_KAFKA_HOME installation dir.

avro-1.8.1 serialize BigDecimal and Short error fix.的更多相关文章
- Java BigDecimal详解,提供了丰富的四舍五入规则
java.math.BigDecimal类提供用于算术,刻度操作,舍入,比较,哈希算法和格式转换操作. toString()方法提供BigDecimal的规范表示.它使用户可以完全控制舍入行为. 提供 ...
- BigDecimal 小数 浮点数 精度 财务计算
简介 float和double类型的使用局限: 单精度浮点型变量float可以处理6~7位有效数,双精度浮点型变量double可以处理15~16位有效数,在实际应用中,如果需要对更大或者更小的数进行运 ...
- BigDecimal类(高精度小数)
位置:java.math.BigDecimal 作用:提供高精度小数数据类型及相关操作 一.基本介绍 BigDecimal为不可变的.任意精度的有符号十进制数,其值为(unscaledValue * ...
- dubbo/dubbox 增加原生thrift及avro支持
(facebook) thrift / (hadoop) avro / (google) probuf(grpc)是近几年来比较抢眼的高效序列化/rpc框架,dubbo框架虽然有thrift的支持,但 ...
- Avro RPC 之 Protocol 定义和代码生成
摘自http://avro.apache.org/docs/current/spec.html#Protocol+Declaration,1.7.6版 Protocol Declaration Avr ...
- BigDecimal遇到的问题,大伙也说说
一:相除精度丢失的问题 BigDecimal的api除法相对加减乘要实现的复杂多了,只介绍常用的我遇到的问题: 问题:两数相除,如果9/3=3整除没问题,但是10/3=0.33333333...... ...
- BigDecimal常被忽略的问题
一:相除精度丢失的问题 BigDecimal的api除法相对加减乘要实现的复杂多了,只介绍常用的我遇到的问题: 问题:两数相除,如果9/3=3整除没问题,但是10/3=0.33333333...... ...
- Java在ACM中的应用
Java在ACM中的应用 —. 在java中的基本头文件(java中叫包) import java.io.*; import java.util.*; //输入Scanner import java. ...
- [转]unity3d 脚本参考-技术文档
unity3d 脚本参考-技术文档 核心提示:一.脚本概览这是一个关于Unity内部脚本如何工作的简单概览.Unity内部的脚本,是通过附加自定义脚本对象到游戏物体组成的.在脚本对象内部不同志的函数被 ...
随机推荐
- linux网络编程之socket编程(十四)
经过令国鸡冻的APEC会之后,北京的冬天终于不冷了,有暖气的日子就是倍儿爽呀~~洗完热水澡,舒服的躺在床上欢乐地敲打着键盘,是件多么幸福的事呀,好了,抒发情感后,正题继续. 上节中已经初步学习了UDP ...
- 【PAT-并查集-水题】L2-007-家庭房产
L2-007. 家庭房产 给定每个人的家庭成员和其自己名下的房产,请你统计出每个家庭的人口数.人均房产面积及房产套数. 输入格式: 输入第一行给出一个正整数N(<=1000),随后N行,每行按下 ...
- logstash可以考虑在项目中用起来
在用Node.js开发项目的时候,我们常用 log4js 模块来进行日志的记录,可以通过配置 log4js 的 Appenders 将日志输出到Console.File和GELF等不同的地方. log ...
- centos中python2.7被覆盖,yum,python重新安装
参考如下,问题以解决,绝对有效 下载链接yum和python2.7网盘 链接:https://pan.baidu.com/s/1sC2crFW1I8F7zndJU0jjcA 提取码:5kia 直接参考 ...
- 【转载】浅谈HTTPS以及Fiddler抓取HTTPS协议
最近想尝试基于Fiddler的录制功能做一些接口的获取和处理工作,碰到的一个问题就是简单连接Fiddler只能抓取HTTP协议,关键的登录请求等HTTPS协议都没有捕捉到,所以想让Fiddler能够同 ...
- TDOA 之 基站接收数据
基站主要 接收同步节点发来的同步信号,代码里定义为S信息. 以及标签节点发来的定位信号,代码中定义为T信号. 代码中使用中断以及帧过滤功能,对模块只接收自己关心设定好的信息,通过中断告知上层,而不是长 ...
- 微信小程序之小技能篇(一)
1,三目运算改变class值: <view class="{{flag ? 'change' : 'change_after'}}">改变字体颜色</view&g ...
- mongodb中帮助信息和命令
在Mongodb中,可以看作是一种面向对象的操作,如果你对与某一个操作不清楚,可以直接help. 在mongodb中,无非是对DB.user.collections.文档的操作. 下面是简单的示例: ...
- 没有向ZK写offset数据
两天公司要学习kafka,结合之前的storm,做了一个简单的集成,之前也参考了网上的例子一些例子,发现或多或少都有一些问题.所以自己做了一个. 这个是网上其他人遇到的问题,给摘录一下,防止以后自己和 ...
- Dao的扩展
题目: 1.查询所有学生记录,包含年级名称2.查询S1年级下的学生记录 一.项目目录 package com.myschool.entity; import java.util.ArrayList; ...