SMOTE RF MLP demo use cross_val_score to find best argument 处理不平衡数据的demo代码 先做smote处理 再用交叉验证找到最好的模型参数 实践表明MLP更好
# _*_coding:UTF-8_*_
from sklearn.externals.six import StringIO
from sklearn import tree
import pydot
import sklearn
import numpy as np
import sys
import pickle
import os
from sklearn.cross_validation import train_test_split
import sklearn.ensemble
from sklearn.model_selection import cross_val_score
# from sklearn.ensemble import ExtraTreesClassifier
from sklearn import preprocessing
import pdb
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedShuffleSplit import os
import collections
import imblearn def iterbrowse(path):
for home, dirs, files in os.walk(path):
for filename in files:
yield os.path.join(home, filename) def get_data(filename):
white_verify = []
with open(filename) as f:
lines = f.readlines()
data = {}
for line in lines:
a = line.split("\t")
if len(a) != 78:
print(line)
raise Exception("fuck")
white_verify.append([float(n) for n in a[3:]])
return white_verify # 显示测试结果
def show_cm(cm, labels):
# Compute percentanges
percent = (cm * 100.0) / np.array(np.matrix(cm.sum(axis=1)).T)
print 'Confusion Matrix Stats'
for i, label_i in enumerate(labels):
for j, label_j in enumerate(labels):
print "%s/%s: %.2f%% (%d/%d)" % (label_i, label_j, (percent[i][j]), cm[i][j], cm[i].sum()) def save_model_to_disk(name, model, model_dir='.'):
serialized_model = pickle.dumps(model, protocol=pickle.HIGHEST_PROTOCOL)
model_path = os.path.join(model_dir, name + '.model')
print 'Storing Serialized Model to Disk (%s:%.2fMeg)' % (name, len(serialized_model) / 1024.0 / 1024.0)
open(model_path, 'wb').write(serialized_model) wanted_feature = {
15, #正向头部直方图中位数,-----H
12, # 正向头部直方图最小,-----H
14, #正向头部直方图平均数,-----H
13, # 正向头部直方图最大,-----H
16, #正向头部直方图标准差, -----H
52, #反向头部直方图不同长度类型数, -----M
51, #反向头部直方图平均数, --------------H
47, #反向头部直方图最小,--------------H
48, #反向头部直方图最大,--------------H
49, #反向头部直方图平均数,--------------H
50, #反向头部直方图平均数,--------------H
23, #正向载荷直方图最大, --------------H
24, #正向载荷直方图平均值,--------------H
25, #正向载荷直方图中位数,--------------H
26, #正向载荷直方图标准差,--------------H
17, #正向头部直方图不同长度类型数,---H
46, #反向包文的时间间隔(时间/包数), ----H
28, #正向载荷直方图小于128字节数个数,----H
29, #正向载荷直方图≥128、<512字节数个数,----H
30, #正向载荷直方图≥512、<1024字节数个数,----H
31, #正向载荷直方图>1024字节数个数,----H
57, #x反向载荷直方图最小,--------------H
60, #反向载荷直方图中位数,--------------H
59, #反向载荷直方图平均值, --------------H
61, #反向载荷直方图标准差,--------------H
58, #反向载荷直方图最大,--------------H
42, #反向当前流的数据包数量,
21, #正向头部直方图大于等于40字节数个数, -----------------------H
56, #反向头部直方图大于等于40字节数个数,------------------------H
65, #反向载荷直方图>1024字节数个数,------------------------H
63, #反向载荷直方图小于128字节数个数,------------------------H
64, #反向载荷直方图≥128、<512字节数个数, ------------------------H
66, #反向载荷直方图≥512、<1024字节数个数,------------------------H
} unwanted_features = {6, 7, 8, 41,42,43,67,68,69,70,71,72,73,74,75} def get_wanted_data(x):
"""
return x
"""
ans = []
for item in x:
#row = [data for i, data in enumerate(item) if i+6 in wanted_feature]
row = [data for i, data in enumerate(item) if i+6 not in unwanted_features]
ans.append(row)
#assert len(row) == len(wanted_feature)
assert len(row) == len(x[0])-len(unwanted_features)
return ans if __name__ == '__main__':
# pdb.set_trace()
neg_file = "cc_data/black/black_all.txt"
pos_file = "cc_data/white/white_all.txt"
X = []
y = []
if os.path.isfile(pos_file):
if pos_file.endswith('.txt'):
pos_set = np.genfromtxt(pos_file)
elif pos_file.endswith('.npy'):
pos_set = np.load(pos_file)
X.extend(pos_set)
y += [0] * len(pos_set)
print("len of white X:", len(X))
l = len(X)
if os.path.isfile(neg_file):
if neg_file.endswith('.txt'):
neg_set = np.genfromtxt(neg_file)
elif neg_file.endswith('.npy'):
neg_set = np.load(neg_file) #X.extend(list(neg_set)*5)
#y += [1] * (5*len(neg_set))
X.extend(neg_set)
y += [1] * len(neg_set)
print("len of black X:", len(X)-l) print("len of X:", len(X))
print("X sample:", X[:3])
print("len of y:", len(y))
print("y sample:", y[:3])
X = [x[3:] for x in X]
X = get_wanted_data(X)
print("filtered X sample:", X[:1]) black_verify = []
for f in iterbrowse("todo/top"):
print(f)
black_verify += get_data(f)
#ValueError: operands could not be broadcast together with shapes (1,74) (75,) (1,74)
black_verify = get_wanted_data(black_verify)
print(black_verify)
black_verify_labels = [1]*len(black_verify) white_verify = get_data("todo/white_verify.txt")
white_verify = get_wanted_data(white_verify)
print(white_verify)
white_verify_labels = [0]*len(white_verify) unknown_verify = get_data("todo/pek_feature74.txt")
unknown_verify = get_wanted_data(unknown_verify)
print(unknown_verify) black_verify2 = get_data("todo/x_rat.txt")
black_verify2 = get_wanted_data(black_verify2)
print(black_verify2) black_verify_labels2 = [1]*len(black_verify2)
"""
# Smote use KNN, so use standard scaler
"""
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X)
#scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X)
print("standard X sample:", X[:3]) black_verify = scaler.transform(black_verify)
print(black_verify) white_verify = scaler.transform(white_verify)
print(white_verify) unknown_verify = scaler.transform(unknown_verify)
print(unknown_verify) black_verify2 = scaler.transform(black_verify2)
print(black_verify2)
# ValueError: operands could not be broadcast together with shapes (756140,75) (42,75) (756140,75)
for i in range(200): # add weight 加大必须检出数据的权重,因为只有10+个样本所以x200增多
X = np.concatenate((X, black_verify))
y += black_verify_labels y = np.array(y) labels = ['white', 'CC']
#if True:
for depth in (128, 64, 32):
print "***"*20
print "hidden_layer_sizes=>", depth
sss = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index] #ratio_of_train = 0.8
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=(1 - ratie_of_train)) print "smote before:"
print(sorted(collections.Counter(y_train).items()))
print(sorted(collections.Counter(y_test).items()))
from imblearn.over_sampling import SMOTE
X_train, y_train = SMOTE().fit_sample(X_train, y_train)
print "smote after:"
print(sorted(collections.Counter(y_train).items()))
X_test2, y_test2 = SMOTE().fit_sample(X_test, y_test) # X_train=preprocessing.normalize(X_train)
# X_test=preprocessing.normalize(X_test)
"""
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(C=0.1, penalty='l2', tol=0.01) import xgboost as xgb
clf = xgb.XGBClassifier(learning_rate=0.1,n_estimators=50,max_depth=6, objective= 'binary:logistic',nthread=40,scale_pos_weight=0.02,seed=666)
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=100, n_jobs=10, max_depth=3, random_state=666, oob_score=True)
"""
clf = MLPClassifier(batch_size=128, learning_rate='adaptive', max_iter=1024,
hidden_layer_sizes=(depth,), random_state=666) clf.fit(X_train, y_train)
print "test confusion_matrix:"
# print clf.feature_importances_
y_pred = clf.predict(X_test)
print(sklearn.metrics.confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred)) print "test confusion_matrix (SMOTE):"
y_pred2 = clf.predict(X_test2)
print(sklearn.metrics.confusion_matrix(y_test2, y_pred2))
print(classification_report(y_test2, y_pred2)) print "all confusion_matrix:"
y_pred = clf.predict(X)
print(sklearn.metrics.confusion_matrix(y, y_pred))
print(classification_report(y, y_pred)) print "black verify confusion_matrix:"
black_verify_pred = clf.predict(black_verify)
print(black_verify_pred)
print(classification_report(black_verify_labels, black_verify_pred)) print "black verify2 confusion_matrix:"
black_verify_pred2 = clf.predict(black_verify2)
print(black_verify_pred2)
print(classification_report(black_verify_labels2, black_verify_pred2)) print "white verify confusion_matrix:"
white_verify_pred = clf.predict(white_verify)
print(white_verify_pred)
print(classification_report(white_verify_labels, white_verify_pred)) print("unknown_verify:")
print(clf.predict(unknown_verify))
print "hidden_layer_sizes=>", depth
print "***"*20
else:
#clf = pickle.loads(open("mpl-acc97-recall98.pkl", 'rb').read())
clf = pickle.loads(open("mlp-add-topx10.model", 'rb').read())
y_pred = clf.predict(X)
print(sklearn.metrics.confusion_matrix(y, y_pred))
print(classification_report(y, y_pred))
import sys
sys.exit(0) """
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("iris.pdf")
""" model_name = "rf_smote"
save_model_to_disk(model_name, clf) # print clf.oob_score_
scores = cross_val_score(clf, X, y, cv=5)
print "scores:"
print scores
实验结果:
MLP 隐藏层神经元个数 128
test confusion_matrix (SMOTE): 测试数据的混淆矩阵
[[131946 120]
[ 299 131767]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 1.00 1.00 1.00 132066
avg / total 1.00 1.00 1.00 264132
all confusion_matrix: 整体数据混淆矩阵
[[659846 483]
[ 52 32474]]
precision recall f1-score support
0 1.00 1.00 1.00 660329
1 0.99 1.00 0.99 32526
avg / total 1.00 1.00 1.00 692855
black verify confusion_matrix: #需要必须检测出来的样本 OK 都检出了
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1]
precision recall f1-score support
1 1.00 1.00 1.00 42
avg / total 1.00 1.00 1.00 42
black verify2 confusion_matrix: # 现网是黑的数据,很难区分的
[0 0 0 0 0 0 0 1 1 1 1]
precision recall f1-score support
0 0.00 0.00 0.00 0
1 1.00 0.36 0.53 11
avg / total 1.00 0.36 0.53 11
white verify confusion_matrix: # 现网是白的数据 很难区分的
[1 1 1 1 0 0 0]
precision recall f1-score support
0 1.00 0.43 0.60 7
1 0.00 0.00 0.00 0
avg / total 1.00 0.43 0.60 7
unknown_verify: # 现网采集的 有好些是黑的数据 希望检出率高 但是不能过高
[1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 0 1 1 0 1
0 1 1 1 1 0 0 1 0 1 0 0 0 1 1 0 1 1 0 0 1 1 0 0 0 0 0 0 0 0 1 1 1 1 0 0 1] 现网验证检出率还不错
隐藏层为64
************************************************************
hidden_layer_sizes=> 64
smote before:
[(0, 528263), (1, 26021)]
[(0, 132066), (1, 6505)]
smote after:
[(0, 528263), (1, 528263)]
test confusion_matrix:
[[131912 154]
[ 24 6481]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 0.98 1.00 0.99 6505
avg / total 1.00 1.00 1.00 138571
test confusion_matrix (SMOTE):
[[131912 154]
[ 193 131873]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 1.00 1.00 1.00 132066
avg / total 1.00 1.00 1.00 264132
all confusion_matrix:
[[659566 763]
[ 34 32492]]
precision recall f1-score support
0 1.00 1.00 1.00 660329
1 0.98 1.00 0.99 32526
avg / total 1.00 1.00 1.00 692855
black verify confusion_matrix:
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1]
precision recall f1-score support
1 1.00 1.00 1.00 42
avg / total 1.00 1.00 1.00 42
black verify2 confusion_matrix:
[0 0 0 0 0 0 0 1 1 1 1]
precision recall f1-score support
0 0.00 0.00 0.00 0
1 1.00 0.36 0.53 11
avg / total 1.00 0.36 0.53 11
white verify confusion_matrix:
[1 1 0 1 0 0 0]
precision recall f1-score support
0 1.00 0.57 0.73 7
1 0.00 0.00 0.00 0
avg / total 1.00 0.57 0.73 7
unknown_verify:
[1 0 1 1 1 0 1 1 1 0 1 0 1 1 1 1 1 1 1 1 1 0 1 1 1 0 0 0 1 0 0 1 0 1 1 0 1
0 0 1 1 1 0 0 1 1 1 1 0 0 1 1 0 1 1 1 0 0 1 0 0 0 0 0 0 0 0 1 0 0 1 0 0 1]
看起来也还不错!
看看随机森林的表现:depth=15,100棵树
test confusion_matrix:
[[132045 21]
[ 16 4818]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 1.00 1.00 1.00 4834
avg / total 1.00 1.00 1.00 136900
test confusion_matrix (SMOTE):
[[132045 21]
[ 246 131820]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 1.00 1.00 1.00 132066
avg / total 1.00 1.00 1.00 264132
all confusion_matrix:
[[660227 102]
[ 29 24139]]
precision recall f1-score support
0 1.00 1.00 1.00 660329
1 1.00 1.00 1.00 24168
avg / total 1.00 1.00 1.00 684497
black verify confusion_matrix:
[0 1 0 0 1 1 1 1 1 1 1 0 0 1 0 1 1 1 1 0 0 1 0 0 1 1 1 0 0 0 0 1 1 1 1 1 1
1 1 1 1 1] 这个是必须要全部检出的
precision recall f1-score support
0 0.00 0.00 0.00 0
1 1.00 0.67 0.80 42
avg / total 1.00 0.67 0.80 42
white verify confusion_matrix:
[0 0 0 0 0 0 1]
precision recall f1-score support
0 1.00 0.86 0.92 7
1 0.00 0.00 0.00 0
avg / total 1.00 0.86 0.92 7
unknown_verify: 现网的检出太低了!过拟合比较严重。。。。
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0]
depth=14的一个
test confusion_matrix:
[[132038 28]
[ 16 4818]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 0.99 1.00 1.00 4834
avg / total 1.00 1.00 1.00 136900
test confusion_matrix (SMOTE):
[[132038 28]
[ 257 131809]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 1.00 1.00 1.00 132066
avg / total 1.00 1.00 1.00 264132
all confusion_matrix:
[[660220 109]
[ 34 24134]]
precision recall f1-score support
0 1.00 1.00 1.00 660329
1 1.00 1.00 1.00 24168
avg / total 1.00 1.00 1.00 684497
black verify confusion_matrix:
[1 1 0 0 1 1 1 1 1 1 0 0 0 0 0 1 1 1 1 0 1 0 0 0 1 1 1 0 0 0 0 1 1 1 1 1 1
1 1 1 1 1]
precision recall f1-score support
0 0.00 0.00 0.00 0
1 1.00 0.64 0.78 42
avg / total 1.00 0.64 0.78 42
white verify confusion_matrix:
[0 0 0 0 0 1 1]
precision recall f1-score support
0 1.00 0.71 0.83 7
1 0.00 0.00 0.00 0
avg / total 1.00 0.71 0.83 7
unknown_verify:
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]
稍微好点,
depth=13的
test confusion_matrix (SMOTE):
[[132037 29]
[ 301 131765]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 1.00 1.00 1.00 132066
avg / total 1.00 1.00 1.00 264132
all confusion_matrix:
[[660217 112]
[ 36 24132]]
precision recall f1-score support
0 1.00 1.00 1.00 660329
1 1.00 1.00 1.00 24168
avg / total 1.00 1.00 1.00 684497
black verify confusion_matrix:
[0 1 0 1 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 0 0 0 0 0 1 1 1 0 0 0 0 1 0 1 1 1 1
0 1 1 1 1]
precision recall f1-score support
0 0.00 0.00 0.00 0
1 1.00 0.55 0.71 42
avg / total 1.00 0.55 0.71 42
white verify confusion_matrix:
[0 0 0 0 0 1 1]
precision recall f1-score support
0 1.00 0.71 0.83 7
1 0.00 0.00 0.00 0
avg / total 1.00 0.71 0.83 7
unknown_verify:
[0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
也差不多,再调整depth也差不多。
整体表现,没有MLP好!
看看逻辑回归的:
test confusion_matrix (SMOTE):
[[114699 17367]
[ 11921 120145]]
precision recall f1-score support
0 0.91 0.87 0.89 132066
1 0.87 0.91 0.89 132066
avg / total 0.89 0.89 0.89 264132
all confusion_matrix:
[[573083 87246]
[ 2877 29649]]
precision recall f1-score support
0 1.00 0.87 0.93 660329
1 0.25 0.91 0.40 32526
avg / total 0.96 0.87 0.90 692855
black verify confusion_matrix:
[1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 0 0
1 1 0 1 1]
precision recall f1-score support
0 0.00 0.00 0.00 0
1 1.00 0.88 0.94 42
avg / total 1.00 0.88 0.94 42
black verify2 confusion_matrix:
[1 1 0 0 0 0 0 1 1 1 1]
precision recall f1-score support
0 0.00 0.00 0.00 0
1 1.00 0.55 0.71 11
avg / total 1.00 0.55 0.71 11
white verify confusion_matrix:
[1 1 1 1 1 1 1]
precision recall f1-score support
0 0.00 0.00 0.00 7
1 0.00 0.00 0.00 0
avg / total 0.00 0.00 0.00 7
unknown_verify:
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
整体精度不够。才0.25.。。。
看看xgboost的:
[[132018 48]
[ 11 6494]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 0.99 1.00 1.00 6505
avg / total 1.00 1.00 1.00 138571
test confusion_matrix (SMOTE):
[[132018 48]
[ 82 131984]]
precision recall f1-score support
0 1.00 1.00 1.00 132066
1 1.00 1.00 1.00 132066
avg / total 1.00 1.00 1.00 264132
all confusion_matrix:
[[660134 195]
[ 29 32497]]
precision recall f1-score support
0 1.00 1.00 1.00 660329
1 0.99 1.00 1.00 32526
avg / total 1.00 1.00 1.00 692855
black verify confusion_matrix:
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1]
precision recall f1-score support
1 1.00 1.00 1.00 42
avg / total 1.00 1.00 1.00 42
black verify2 confusion_matrix:
[0 0 0 0 0 0 0 1 0 1 1]
precision recall f1-score support
0 0.00 0.00 0.00 0
1 1.00 0.27 0.43 11
avg / total 1.00 0.27 0.43 11
white verify confusion_matrix:
[0 0 1 0 1 0 1]
precision recall f1-score support
0 1.00 0.57 0.73 7
1 0.00 0.00 0.00 0
avg / total 1.00 0.57 0.73 7
unknown_verify:
[0 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 1 0 0
0 1 1 1 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0]
整体看来比随机森林好!
SMOTE RF MLP demo use cross_val_score to find best argument 处理不平衡数据的demo代码 先做smote处理 再用交叉验证找到最好的模型参数 实践表明MLP更好的更多相关文章
- cross_val_score 交叉验证与 K折交叉验证,嗯都是抄来的,自己作个参考
因为sklearn cross_val_score 交叉验证,这个函数没有洗牌功能,添加K 折交叉验证,可以用来选择模型,也可以用来选择特征 sklearn.model_selection.cross ...
- 各种数据分析图demo
极地蛛网图:http://www.hcharts.cn/demo/index.php?p=61 各种数据分析图demo: http://www.hcharts.cn/demo/index.php?p= ...
- ResNeXt——与 ResNet 相比,相同的参数个数,结果更好:一个 101 层的 ResNeXt 网络,和 200 层的 ResNet 准确度差不多,但是计算量只有后者的一半
from:https://blog.csdn.net/xuanwu_yan/article/details/53455260 背景 论文地址:Aggregated Residual Transform ...
- Android与IOS的优缺点比较 对 Android 与 IOS 比较是个个人的问题。 就好比我来说,我两个都用。我深知这两个平台的优缺点。所以,我决定分享我关于这两个移动平台的观点。另外,然后谈谈我对新的 Ubuntu 移动平台的印象和它的优势。 IOS 的优点 虽然这些天我是个十足的 Android 用户,但我必须承认 IOS 在某些方面做的是不错。首先,苹果公司在他们的设备更新方面有更
Android与IOS的优缺点比较 对 Android 与 IOS 比较是个个人的问题. 就好比我来说,我两个都用.我深知这两个平台的优缺点.所以,我决定分享我关于这两个移动平台的观点.另外,然后谈谈 ...
- 前端通信:SSE设计方案(二)--- 服务器推送技术的实践以及一些应用场景的demo(包括在线及时聊天系统以及线上缓存更新,代码热修复案例)
距离上一篇博客,这篇文章的发布大概过了整整三个月.我也从饿了么度过了试用期,成为了正式员工.刚进来恰好遇到项目底层改造和迁移,将项目从angular全部迁移到vue上,所以适应这边的节奏和业务的开发任 ...
- Jsoup+FastJson制作新闻数据接口-Demo
经常用到 编写出来直接拿来用 这个适合在服务端结合servlet来做接口:需要下载jsoup+fastjson两个包 Jsoup使用手册:http://www.open-open.com/jsoup/ ...
- tflearn kears GAN官方demo代码——本质上GAN是先训练判别模型让你能够识别噪声,然后生成模型基于噪声生成数据,目标是让判别模型出错。GAN的过程就是训练这个生成模型参数!!!
GAN:通过 将 样本 特征 化 以后, 告诉 模型 哪些 样本 是 黑 哪些 是 白, 模型 通过 训练 后, 理解 了 黑白 样本 的 区别, 再输入 测试 样本 时, 模型 就可以 根据 以往 ...
- React和Vue组件间数据传递demo
一.React (一)父组件向子组件传数据 简单的向下传递参数 /* Parent */ class App extends Component { render() { return ( <d ...
- 如何在小程序自定义组件和动态传入数据小demo
在开发过程中,我们会将页面常用功能抽象为一个组件,这样既方便又可以避免代码冗余.小程序中也提供了自定义组件,了解Vue的伙伴们会发现其实和Vue的组件化很相似.这里用返回顶部功能来说说如何自定义组件, ...
随机推荐
- 查看hive的配置信息
在启动hive时设置配置属性信息 $ bin/hive --hiveconf <property=value> 查看当前所有的配置信息 hive > set ; hive (db_h ...
- 在python中是如何管理内存的
python有一个私有堆空间来保存所有的对象和数据结构.作为开发者,我们无法访问它,是解释器在管理它,但是有了核心api后,我们可以访问一些工具.python内存管理器控制内存分配 内置垃圾回收器会回 ...
- Pacemaker详解
一.前言 云计算与集群系统密不可分,作为分布式计算和集群计算的集大成者,云计算的基础设施必须通过集群进行管理控制,而作为拥有大量资源与节点的集群,必须具备一个强大的集群资源管理器(Cluster sy ...
- iOS代码瘦身实践
1 分析当前ipa的组成 一般一个ipa会包含: 1) 资源文件 本地文件:数据.配置.数据库等等 字体文件 图片资源 2) 源代码 通过生成linkmap文件,分析源代码生成的编译文件的大小.在B ...
- iOS 结构简单清晰的 设置页面
这个是也是看了人家的代码,觉得甚是简单清晰,也是比较容易扩展.拿来学习一下 效果展示: 重点有以下2处: 1 .建立groupModel 列清组元素:当前组list 集合, 是否有header 或者 ...
- $Java正则表达式基础整理
(一)正则表达式及语法简介 String类使用正则表达式的几个方法: 正则表达式支持的合法字符: 特殊字符: 预定义字符: 方括号表达式: 圆括号表达式:用于将多个表达式组成一个子表达式,可以使用或运 ...
- Mybatis一对多/多对多查询时只查出了一条数据
问题描述: 如果三表(包括了关系表)级联查询,主表和明细表的主键都是id的话,明细表的多条数据只能查询出来第一条/最后一条数据. 三个表,权限表(Permission),权限组表(Permission ...
- 在Java中调用Python代码
极少数时候,我们会碰到类似这样的问题:与A同学合作写代码, A同学只会写Python,不熟悉Java ,而你只会写Java不擅长Python,并且发现难以用Java来重写对方的代码,这时,就不得不想方 ...
- CMD 删除脚本
CMD 删除脚本 forfiles /p D:\BACKUP\WindowsImageBackup /s /m *.* /d -14 /c "cmd /c del @file"; ...
- codeforces Codeforces Round #318 div2 A. Bear and Elections 【优先队列】
A. Bear and Elections time limit per test 1 second memory limit per test 256 megabytes input standar ...