avro-1.8.1 serialize BigDecimal and Short error fix.
1. create mysql table like
CREATE TABLE `test` (
`a` tinyint(4) NOT NULL DEFAULT '',
`b` decimal(12,0) DEFAULT NULL,
`c` decimal(5,0) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
2. start kafka connect using Debezium mysql plugin
{
"name": "inventory-connector",
"config": {
"connector.class": "io.debezium.connector.mysql.MySqlConnector",
"tasks.max": "1",
"database.hostname": "localhost",
"database.port": "3306",
"database.user": "root",
"database.password": "root",
"database.server.id": "223344",
"database.server.name": "localhost",
"database.whitelist": "inventory",
"table.whitelist":"inventory.test",
"database.history.kafka.bootstrap.servers": "localhost:9092",
"database.history.kafka.topic": "schema-changes.inventory",
"include.schema.changes":"false",
"transforms": "extractField",
"transforms.extractField.type": "com.centchain.kafka.connect.mysql.DebeziumMysql$Value",
"transforms.extractField.field": "after"
}
}
3. get errors:
[-- ::,] INFO WorkerSourceTask{id=cashier--} flushing outstanding messages for offset commit (org.apache.kafka.connect.runtime.WorkerSourceTask:)
[-- ::,] ERROR WorkerSourceTask{id=cashier--} Task threw an uncaught and unrecoverable exception (org.apache.kafka.connect.runtime.WorkerTask:)
org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.WorkerSourceTask.convertTransformedRecord(WorkerSourceTask.java:)
at org.apache.kafka.connect.runtime.WorkerSourceTask.sendRecords(WorkerSourceTask.java:)
at org.apache.kafka.connect.runtime.WorkerSourceTask.execute(WorkerSourceTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:)
at java.util.concurrent.FutureTask.run(FutureTask.java:)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:)
at java.lang.Thread.run(Thread.java:)
Caused by: org.apache.avro.AvroRuntimeException: Unknown datum class: class java.lang.Short
at org.apache.avro.util.internal.JacksonUtils.toJson(JacksonUtils.java:)
at org.apache.avro.util.internal.JacksonUtils.toJsonNode(JacksonUtils.java:)
at org.apache.avro.Schema$Field.<init>(Schema.java:)
at org.apache.avro.Schema$Field.<init>(Schema.java:)
at io.confluent.connect.avro.AvroData.addAvroRecordField(AvroData.java:)
at io.confluent.connect.avro.AvroData.fromConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.fromConnectSchema(AvroDa」
4. fix
file location: avro-release-1.8.1/lang/java/avro/src/main/java/org/apache/avro/util/internal/JacksonUtils.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.internal; import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.JsonProperties;
import org.apache.avro.Schema;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.util.TokenBuffer; public class JacksonUtils {
static final String BYTES_CHARSET = "ISO-8859-1"; private JacksonUtils() {
} public static JsonNode toJsonNode(Object datum) {
if (datum == null) {
return null;
}
try {
TokenBuffer generator = new TokenBuffer(new ObjectMapper());
toJson(datum, generator);
return new ObjectMapper().readTree(generator.asParser());
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
} @SuppressWarnings(value="unchecked")
static void toJson(Object datum, JsonGenerator generator) throws IOException {
if (datum == JsonProperties.NULL_VALUE) { // null
generator.writeNull();
} else if (datum instanceof Map) { // record, map
generator.writeStartObject();
for (Map.Entry<Object,Object> entry : ((Map<Object,Object>) datum).entrySet()) {
generator.writeFieldName(entry.getKey().toString());
toJson(entry.getValue(), generator);
}
generator.writeEndObject();
} else if (datum instanceof Collection) { // array
generator.writeStartArray();
for (Object element : (Collection<?>) datum) {
toJson(element, generator);
}
generator.writeEndArray();
} else if (datum instanceof byte[]) { // bytes, fixed
generator.writeString(new String((byte[]) datum, BYTES_CHARSET));
} else if (datum instanceof CharSequence || datum instanceof Enum<?>) { // string, enum
generator.writeString(datum.toString());
} else if (datum instanceof Double) { // double
generator.writeNumber((Double) datum);
} else if (datum instanceof Float) { // float
generator.writeNumber((Float) datum);
} else if (datum instanceof Long) { // long
generator.writeNumber((Long) datum);
} else if (datum instanceof Integer) { // int
generator.writeNumber((Integer) datum);
}else if ( datum instanceof Short) { // short
generator.writeNumber(new Integer(datum.toString()));
}else if (datum instanceof Boolean) { // boolean
generator.writeBoolean((Boolean) datum);
}
else if (datum instanceof BigDecimal){
generator.writeNumber((BigDecimal) datum);
} else {
throw new AvroRuntimeException("Unknown datum class: " + datum.getClass());
}
} public static Object toObject(JsonNode jsonNode) {
return toObject(jsonNode, null);
} public static Object toObject(JsonNode jsonNode, Schema schema) {
if (schema != null && schema.getType().equals(Schema.Type.UNION)) {
return toObject(jsonNode, schema.getTypes().get(0));
}
if (jsonNode == null) {
return null;
} else if (jsonNode.isNull()) {
return JsonProperties.NULL_VALUE;
} else if (jsonNode.isBoolean()) {
return jsonNode.asBoolean();
} else if (jsonNode.isInt()) {
if (schema == null || schema.getType().equals(Schema.Type.INT)) {
return jsonNode.asInt();
} else if (schema.getType().equals(Schema.Type.LONG)) {
return jsonNode.asLong();
}
}else if (jsonNode.isBigDecimal()){
return jsonNode.asDouble();
}else if (jsonNode.isLong()) {
return jsonNode.asLong();
} else if (jsonNode.isDouble()) {
if (schema == null || schema.getType().equals(Schema.Type.DOUBLE)) {
return jsonNode.asDouble();
} else if (schema.getType().equals(Schema.Type.FLOAT)) {
return (float) jsonNode.asDouble();
}
} else if (jsonNode.isTextual()) {
if (schema == null || schema.getType().equals(Schema.Type.STRING) ||
schema.getType().equals(Schema.Type.ENUM)) {
return jsonNode.asText();
} else if (schema.getType().equals(Schema.Type.BYTES)) {
try {
return jsonNode.getTextValue().getBytes(BYTES_CHARSET);
} catch (UnsupportedEncodingException e) {
throw new AvroRuntimeException(e);
}
}
} else if (jsonNode.isArray()) {
List l = new ArrayList();
for (JsonNode node : jsonNode) {
l.add(toObject(node, schema == null ? null : schema.getElementType()));
}
return l;
} else if (jsonNode.isObject()) {
Map m = new LinkedHashMap();
for (Iterator<String> it = jsonNode.getFieldNames(); it.hasNext(); ) {
String key = it.next();
Schema s = null;
if (schema == null) {
s = null;
} else if (schema.getType().equals(Schema.Type.MAP)) {
s = schema.getValueType();
} else if (schema.getType().equals(Schema.Type.RECORD)) {
s = schema.getField(key).schema();
}
Object value = toObject(jsonNode.get(key), s);
m.put(key, value);
}
return m;
}
return null;
}
}
The key is in
line 85-86 which fix error for short
line 90-91,117-118 which fix error for BigDecimal
5. result:
5.1 mysql -> kafka
lenmom@M1701:~/workspace/software/confluent-community-5.1.-2.11$ bin/kafka-avro-console-consumer --bootstrap-server 127.0.0.1: --from-beginning --topic localhost.a.test
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0001"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0002"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0003"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
{"a":,"b":{"bytes":"\u0001"},"c":{"bytes":"\u0004"},"operation_type":"c","pt_log_d":"","last_update_timestamp":}
5.2 kafka-hive
command config for connector:
{
"name": "hive-sink",
"config": {
"connector.class": "io.confluent.connect.hdfs.HdfsSinkConnector",
"tasks.max": "",
"topics": "localhost.a.test",
"hdfs.url": "hdfs://127.0.0.1:9000/",
"logs.dir": "/logs",
"topics.dir": "/inventory/",
"hadoop.conf.dir": "/home/lenmom/workspace/software/hadoop-2.7.3/etc/hadoop/",
"flush.size": "",
"rotate.interval.ms": "",
"hive.integration": true,
"hive.database": "inventory",
"partitioner.class":"io.confluent.connect.hdfs.partitioner.FieldPartitioner",
"partition.field.name":"pt_log_d",
"hive.metastore.uris": "thrift://127.0.0.1:9083",
"schema.compatibility": "BACKWARD"
}
}
result:
hive> select * from localhost_a_test;
OK
c -- ::17.029
c -- ::17.029
c -- ::17.029
c -- ::17.029
Time taken: 0.168 seconds, Fetched: row(s)
6. for table schema
CREATE TABLE `decimalTest` (
`POINTSDAY` decimal(12,0) NOT NULL DEFAULT '' ,
`POINTSMONTH` decimal(12,0) NOT NULL DEFAULT '' ,
`CASHDAY` decimal(12,0) NOT NULL DEFAULT '' ,
`CASHMONTH` decimal(12,0) NOT NULL DEFAULT ''
) insert into decimalTest values(1,2,3);
if we use hdfs-connector to sink to hive, we would get error like
[-- ::,] ERROR WorkerSinkTask{id=hive-sink-} Task threw an uncaught and unrecoverable exception (org.apache.kafka.connect.runtime.WorkerTask:)
org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.convertAndTransformRecord(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.convertMessages(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:)
at java.util.concurrent.FutureTask.run(FutureTask.java:)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:)
at java.lang.Thread.run(Thread.java:)
Caused by: org.apache.kafka.connect.errors.SchemaBuilderException: Invalid default value
at org.apache.kafka.connect.data.SchemaBuilder.defaultValue(SchemaBuilder.java:)
at io.confluent.connect.avro.AvroData.toConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.toConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.toConnectSchema(AvroData.java:)
at io.confluent.connect.avro.AvroData.toConnectData(AvroData.java:)
at io.confluent.connect.avro.AvroConverter.toConnectData(AvroConverter.java:)
at org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$(WorkerSinkTask.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:)
... more
Caused by: org.apache.kafka.connect.errors.DataException: Invalid value: null used for required field: "null", schema type: BYTES
at org.apache.kafka.connect.data.ConnectSchema.validateValue(ConnectSchema.java:)
at org.apache.kafka.connect.data.ConnectSchema.validateValue(ConnectSchema.java:)
at org.apache.kafka.connect.data.SchemaBuilder.defaultValue(SchemaBuilder.java:)
... more
[-- ::,] ERROR WorkerSinkTask{id=hive-sink-} Task is being killed and will not recover until manually restarted (org.apache.kafka.connect.runtime.WorkerTask:)
which is caused by serialization decimal using kafka-avro-convertor error.
To fix this error, I added logic
6.1 /schema-registry-5.1.0/avro-converter/src/main/java/io/confluent/connect/avro/AvroData.java
private Object defaultValueFromAvro(Schema schema,
org.apache.avro.Schema avroSchema,
Object value,
ToConnectContext toConnectContext) {
// The type will be JsonNode if this default was pulled from a Connect default field, or an
// Object if it's the actual Avro-specified default. If it's a regular Java object, we can
// use our existing conversion tools.
if (!(value instanceof JsonNode)) {
return toConnectData(schema, value, toConnectContext);
} JsonNode jsonValue = (JsonNode) value;
switch (avroSchema.getType()) {
case INT:
if (schema.type() == Schema.Type.INT8) {
return (byte) jsonValue.getIntValue();
} else if (schema.type() == Schema.Type.INT16) {
return (short) jsonValue.getIntValue();
} else if (schema.type() == Schema.Type.INT32) {
return jsonValue.getIntValue();
} else {
break;
} case LONG:
return jsonValue.getLongValue(); case FLOAT:
return (float) jsonValue.getDoubleValue();
case DOUBLE:
return jsonValue.getDoubleValue(); case BOOLEAN:
return jsonValue.asBoolean(); case NULL:
return null; case STRING:
case ENUM:
return jsonValue.asText(); 43 case BYTES:
44 return jsonValue.getDecimalValue();
45 case FIXED:
46 try {
47 return jsonValue.getBinaryValue();
48 } catch (IOException e) {
49 throw new DataException("Invalid binary data in default value");
50 }
// return convertIntegerToBytes(jsonValue.getIntValue());
// return jsonValue.getIntValue(); case ARRAY: {
if (!jsonValue.isArray()) {
throw new DataException("Invalid JSON for array default value: " + jsonValue.toString());
}
List<Object> result = new ArrayList<>(jsonValue.size());
for (JsonNode elem : jsonValue) {
result.add(
defaultValueFromAvro(schema, avroSchema.getElementType(), elem, toConnectContext));
}
return result;
} case MAP: {
if (!jsonValue.isObject()) {
throw new DataException("Invalid JSON for map default value: " + jsonValue.toString());
}
Map<String, Object> result = new HashMap<>(jsonValue.size());
Iterator<Map.Entry<String, JsonNode>> fieldIt = jsonValue.getFields();
while (fieldIt.hasNext()) {
Map.Entry<String, JsonNode> field = fieldIt.next();
Object converted = defaultValueFromAvro(
schema, avroSchema.getElementType(), field.getValue(), toConnectContext);
result.put(field.getKey(), converted);
}
return result;
} case RECORD: {
if (!jsonValue.isObject()) {
throw new DataException("Invalid JSON for record default value: " + jsonValue.toString());
} Struct result = new Struct(schema);
for (org.apache.avro.Schema.Field avroField : avroSchema.getFields()) {
Field field = schema.field(avroField.name());
JsonNode fieldJson = ((JsonNode) value).get(field.name());
Object converted = defaultValueFromAvro(
field.schema(), avroField.schema(), fieldJson, toConnectContext);
result.put(avroField.name(), converted);
}
return result;
} case UNION: {
// Defaults must match first type
org.apache.avro.Schema memberAvroSchema = avroSchema.getTypes().get();
if (memberAvroSchema.getType() == org.apache.avro.Schema.Type.NULL) {
return null;
} else {
return defaultValueFromAvro(
schema.field(unionMemberFieldName(memberAvroSchema)).schema(),
memberAvroSchema,
value,
toConnectContext);
}
}
default: {
return null;
}
}
return null;
}
after the fix, rebuild the jar and replace the file kafka-connect-avro-converter-5.1.0.jar in confluent kafka installation dir.
then the data should be able to sink to hive now.
hive> select * from decimalTest limit ;
[WARNING] Avro: Invalid default for field POINTSDAY: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
[WARNING] Avro: Invalid default for field POINTSMONTH: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
[WARNING] Avro: Invalid default for field CASHDAY: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
[WARNING] Avro: Invalid default for field CASHMONTH: not a {"type":"bytes","scale":,"precision":,"connect.version":,"connect.parameters":{"scale":"","connect.decimal.precision":""},"connect.default":"AA==","connect.name":"org.apache.kafka.connect.data.Decimal","logicalType":"decimal"}
6.2 as we ca see from above, there's warnings if we query data in hive, to elemiate the warnings
/avro-release-1.8.1/lang/java/avro/src/main/java/org/apache/avro/Schema.java
private static boolean isValidDefault(Schema schema, JsonNode defaultValue) {
if (defaultValue == null)
return false;
switch (schema.getType()) { case BYTES:
7 if (schema.logicalType.getName().equals("decimal")||
8 schema.logicalType.getName().toLowerCase().equals("bigdecimal")){
9 return defaultValue.isBigDecimal();
10 }
11 else{
12 return defaultValue.isTextual();
13 }
case STRING:
case ENUM:
case FIXED:
17 return defaultValue.isTextual();
case INT:
case LONG:
case FLOAT:
case DOUBLE:
return defaultValue.isNumber();
case BOOLEAN:
return defaultValue.isBoolean();
case NULL:
return defaultValue.isNull();
case ARRAY:
if (!defaultValue.isArray())
return false;
for (JsonNode element : defaultValue)
if (!isValidDefault(schema.getElementType(), element))
return false;
return true;
case MAP:
if (!defaultValue.isObject())
return false;
for (JsonNode value : defaultValue)
if (!isValidDefault(schema.getValueType(), value))
return false;
return true;
case UNION: // union default: first branch
return isValidDefault(schema.getTypes().get(0), defaultValue);
case RECORD:
if (!defaultValue.isObject())
return false;
for (Field field : schema.getFields())
if (!isValidDefault(field.schema(),
defaultValue.has(field.name())
? defaultValue.get(field.name())
: field.defaultValue()))
return false;
return true;
default:
return false;
}
}
after the fix, replace the jar file in $HIVE_HOME and $CONFLUENT_KAFKA_HOME installation dir.
avro-1.8.1 serialize BigDecimal and Short error fix.的更多相关文章
- Java BigDecimal详解,提供了丰富的四舍五入规则
java.math.BigDecimal类提供用于算术,刻度操作,舍入,比较,哈希算法和格式转换操作. toString()方法提供BigDecimal的规范表示.它使用户可以完全控制舍入行为. 提供 ...
- BigDecimal 小数 浮点数 精度 财务计算
简介 float和double类型的使用局限: 单精度浮点型变量float可以处理6~7位有效数,双精度浮点型变量double可以处理15~16位有效数,在实际应用中,如果需要对更大或者更小的数进行运 ...
- BigDecimal类(高精度小数)
位置:java.math.BigDecimal 作用:提供高精度小数数据类型及相关操作 一.基本介绍 BigDecimal为不可变的.任意精度的有符号十进制数,其值为(unscaledValue * ...
- dubbo/dubbox 增加原生thrift及avro支持
(facebook) thrift / (hadoop) avro / (google) probuf(grpc)是近几年来比较抢眼的高效序列化/rpc框架,dubbo框架虽然有thrift的支持,但 ...
- Avro RPC 之 Protocol 定义和代码生成
摘自http://avro.apache.org/docs/current/spec.html#Protocol+Declaration,1.7.6版 Protocol Declaration Avr ...
- BigDecimal遇到的问题,大伙也说说
一:相除精度丢失的问题 BigDecimal的api除法相对加减乘要实现的复杂多了,只介绍常用的我遇到的问题: 问题:两数相除,如果9/3=3整除没问题,但是10/3=0.33333333...... ...
- BigDecimal常被忽略的问题
一:相除精度丢失的问题 BigDecimal的api除法相对加减乘要实现的复杂多了,只介绍常用的我遇到的问题: 问题:两数相除,如果9/3=3整除没问题,但是10/3=0.33333333...... ...
- Java在ACM中的应用
Java在ACM中的应用 —. 在java中的基本头文件(java中叫包) import java.io.*; import java.util.*; //输入Scanner import java. ...
- [转]unity3d 脚本参考-技术文档
unity3d 脚本参考-技术文档 核心提示:一.脚本概览这是一个关于Unity内部脚本如何工作的简单概览.Unity内部的脚本,是通过附加自定义脚本对象到游戏物体组成的.在脚本对象内部不同志的函数被 ...
随机推荐
- main方法中参数"String[ ] args"详解
1.在编写完一个有主方法的java文件时,需要在cmd窗口中先编译此java文件(javac xxx.java),然后再运行(java xxx) 其实在运行java xxx的时候如果后面跟着参数用空格 ...
- 2020还有9天!Winforms开发有哪些期待?DevExpress 2020计划出炉
下载DevExpress v19.2完整版 DevExpress Winforms Controls 内置140多个UI控件和库,完美构建流畅.美观且易于使用的应用程序.DevExpress Winf ...
- Selenium常用API的使用java语言之2-环境安装之IntelliJ IDEA
1.安装IntelliJ IDEA 你可能会问,为什么不用Eclipse呢?随着发展IntelliJ IDEA有超越Eclipse的势头,JetBrains公司的IDE基本上已经一统了各家主流编程语言 ...
- MOS管做开关之初理解
杰杰 物联网IoT开发 2017-10-12 大家好,我是杰杰. 今晚,设计电路搞了一晚上,终于从模电渣渣的我,把MOS管理解了那么一丢丢,如有写的不好的地方,请指出来.谢谢. ...
- 聊聊rocketmq的sendBatchMessage
序 本文主要研究一下rocketmq的sendBatchMessage SendMessageRequestHeader rocketmq-all-4.6.0-source-release/commo ...
- node 异步回调 —迭代记录
1.0 开始时node采用了基础的js回调形势 const fs = require('fs'); fs.readFile('./package.json',(err,data) => { i ...
- [Angular 8] Calculate and Measure Performance budgets with the Angular CLI
Measuring is extremely important, without numbers we don’t know about potential problems and we don’ ...
- sql server newid() 的用法
sql newid()随机函数 从A表随机取2条记录,用SELECT TOP 10 * FROM ywle order by newid()order by 一般是根据某一字段排序,newid() ...
- YARN构建--解决cypress下载慢问题
背景 注意: 此方案仅适合已经自行搭建私有仓库的用户使用 如非必要,尽可能使用软件开发云或其他服务提供的镜像站,避免此类特殊处理(会导致仓库维护成本增加) 场景描述 YARN构 ...
- Spring的注解@Qualifier用法
Spring的注解@Qualifier用法在Controller中需要注入service那么我的这个server有两个实现类如何区分开这两个impl呢?根据注入资源的注解不同实现的方式有一点小小的区别 ...