【Keras案例学习】 sklearn包装器使用示范(mnist_sklearn_wrapper)
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
# sklean接口的包装器KerasClassifier,作为sklearn的分类器接口
from keras.wrappers.scikit_learn import KerasClassifier
# 穷搜所有特定的参数值选出最好的模型参数
from sklearn.grid_search import GridSearchCV
Using TensorFlow backend.
# 类别的数目
nb_classes = 10
# 输入图像的维度
img_rows, img_cols = 28, 28
# 读取数据
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# 读取的数据不包含通道维,因此shape为(60000,28,28)
# 为了保持和后端tensorflow的数据格式一致,将数据补上通道维
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
# 新的数据shape为 (60000,28,28,1), 1代表通道是1,也就是灰阶图片
# 指明输入数据的大小,便于后面搭建网络的第一层传入该参数
input_shape = (img_rows, img_cols, 1)
# 数据类型改为float32,单精度浮点数
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# 数据归一化(图像数据常用)
X_train /= 255
X_test /= 255
# 将类别标签转换为one-hot编码
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
# 定义配置卷积网络模型的函数
def make_model(dense_layer_sizes, nb_filters, nb_conv, nb_pool):
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
for layer_size in dense_layer_sizes:
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
return model
# 全连接层的备选参数列表
dense_size_candidates = [[32], [64], [32, 32], [64, 64]]
# 实现为Keras准备的sklearn分类器接口,创建一个分类器/评估器对象
# 传入的参数为:
# build_fn: callable function or class instance
# **sk_params: model parameters & fitting parameters
# 具体分析如下:
# 传入的第一个参数(build_fn)为可回调的函数,该函数建立、配置并返回一个Keras model,
# 该model将被用来训练/预测,这里我们传入了刚刚定义好的make_model函数
# 传入的第二个参数(**sk_params)为关键字参数(关键字参数在函数内部自动组装为一个dict),
# 既可以是模型的参数,也可以是训练的参数,合法的模型参数就是build_fn的参数,
# 注意,像所有sklearn中其他的评估器(estimator)一样,build_fn应当为其参数提供默认值,
# 以便我们在建立estimator的时候不用向sk_params传入任何值。
# sk_params也可以接收用来调用fit/predict/predict_proba/score方法的参数,
# 例如'nb_epoch','batch_size'
# fit/predict/predict_proba/score方法的参数将会优先从传入fit/predict/predict_proba/score
# 的字典参数中选择,其次才从传入sk_params的参数中选,最后才选择keras的Sequential模型的默认参数中选择
# 这里我们传入了用于调用fit方法的batch_size参数
my_classifier = KerasClassifier(make_model, batch_size=32)
# 当调用sklearn的grid_search接口时,合法的可调参数就是传给sk_params的参数,包括训练参数
# 换句话说,就是可以用grid_search来选择最佳的batch_size/nb_epoch,或者其他的一些模型参数
# GridSearchCV类,穷搜(Exhaustive search)评估器中所有特定的参数,
# 其重要的两类方法为fit和predict
# 传入参数为评估器对象my_classifier,由每一个grid point实例化一个estimator
# 参数网格param_grid,类型为dict,需要尝试的参数名称以及对应的数值
# 评估方式scoring,这里采用对数损失来评估
validator = GridSearchCV(my_classifier,
param_grid={'dense_layer_sizes': dense_size_candidates,
'nb_epoch': [3,6],
'nb_filters': [8],
'nb_conv': [3],
'nb_pool': [2]},
scoring='log_loss')
# 根据各个参数值的不同组合在(X_train, y_train)上训练模型
validator.fit(X_train, y_train)
# 打印出训练过程中找到的最佳参数
print('Yhe parameters of the best model are: ')
print(validator.best_params_)
Epoch 1/3
40000/40000 [==============================] - 14s - loss: 0.8058 - acc: 0.7335
Epoch 2/3
40000/40000 [==============================] - 10s - loss: 0.4620 - acc: 0.8545
Epoch 3/3
40000/40000 [==============================] - 10s - loss: 0.3958 - acc: 0.8747
19776/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 11s - loss: 0.9589 - acc: 0.6804
Epoch 2/3
40000/40000 [==============================] - 10s - loss: 0.5885 - acc: 0.8116
Epoch 3/3
40000/40000 [==============================] - 10s - loss: 0.5021 - acc: 0.8429
19488/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 11s - loss: 0.9141 - acc: 0.6958
Epoch 2/3
40000/40000 [==============================] - 10s - loss: 0.5716 - acc: 0.8136
Epoch 3/3
40000/40000 [==============================] - 10s - loss: 0.4515 - acc: 0.8547
19584/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 11s - loss: 0.8968 - acc: 0.6983
Epoch 2/6
40000/40000 [==============================] - 10s - loss: 0.5692 - acc: 0.8130
Epoch 3/6
40000/40000 [==============================] - 10s - loss: 0.4600 - acc: 0.8494
Epoch 4/6
40000/40000 [==============================] - 10s - loss: 0.4091 - acc: 0.8694
Epoch 5/6
40000/40000 [==============================] - 10s - loss: 0.3717 - acc: 0.8790
Epoch 6/6
40000/40000 [==============================] - 10s - loss: 0.3461 - acc: 0.8898
20000/20000 [==============================] - 1s
Epoch 1/6
40000/40000 [==============================] - 11s - loss: 0.8089 - acc: 0.7310
Epoch 2/6
40000/40000 [==============================] - 10s - loss: 0.4770 - acc: 0.8498
Epoch 3/6
40000/40000 [==============================] - 10s - loss: 0.4086 - acc: 0.8704
Epoch 4/6
40000/40000 [==============================] - 10s - loss: 0.3657 - acc: 0.8860
Epoch 5/6
40000/40000 [==============================] - 10s - loss: 0.3383 - acc: 0.8938
Epoch 6/6
40000/40000 [==============================] - 10s - loss: 0.3164 - acc: 0.9027
19520/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 11s - loss: 0.8393 - acc: 0.7214
Epoch 2/6
40000/40000 [==============================] - 10s - loss: 0.5132 - acc: 0.8379
Epoch 3/6
40000/40000 [==============================] - 10s - loss: 0.4331 - acc: 0.8635
Epoch 4/6
40000/40000 [==============================] - 10s - loss: 0.3813 - acc: 0.8808
Epoch 5/6
40000/40000 [==============================] - 10s - loss: 0.3530 - acc: 0.8902
Epoch 6/6
40000/40000 [==============================] - 10s - loss: 0.3278 - acc: 0.8986
19936/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 11s - loss: 0.5975 - acc: 0.8099
Epoch 2/3
40000/40000 [==============================] - 10s - loss: 0.3181 - acc: 0.9048
Epoch 3/3
40000/40000 [==============================] - 10s - loss: 0.2673 - acc: 0.9199
19808/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 11s - loss: 0.6155 - acc: 0.8040
Epoch 2/3
40000/40000 [==============================] - 11s - loss: 0.3500 - acc: 0.8951
Epoch 3/3
40000/40000 [==============================] - 11s - loss: 0.2864 - acc: 0.9156
20000/20000 [==============================] - 1s
Epoch 1/3
40000/40000 [==============================] - 11s - loss: 0.7519 - acc: 0.7560
Epoch 2/3
40000/40000 [==============================] - 10s - loss: 0.4660 - acc: 0.8580
Epoch 3/3
40000/40000 [==============================] - 10s - loss: 0.3553 - acc: 0.8936
19776/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 11s - loss: 0.5869 - acc: 0.8162
Epoch 2/6
40000/40000 [==============================] - 11s - loss: 0.3279 - acc: 0.9014
Epoch 3/6
40000/40000 [==============================] - 11s - loss: 0.2725 - acc: 0.9187
Epoch 4/6
40000/40000 [==============================] - 11s - loss: 0.2366 - acc: 0.9291
Epoch 5/6
40000/40000 [==============================] - 11s - loss: 0.2102 - acc: 0.9386
Epoch 6/6
40000/40000 [==============================] - 16s - loss: 0.1954 - acc: 0.9423
19840/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 11s - loss: 0.5526 - acc: 0.8262
Epoch 2/6
40000/40000 [==============================] - 11s - loss: 0.2903 - acc: 0.9142
Epoch 3/6
40000/40000 [==============================] - 11s - loss: 0.2361 - acc: 0.9302
Epoch 4/6
40000/40000 [==============================] - 11s - loss: 0.2064 - acc: 0.9396
Epoch 5/6
40000/40000 [==============================] - 10s - loss: 0.1886 - acc: 0.9443
Epoch 6/6
40000/40000 [==============================] - 10s - loss: 0.1755 - acc: 0.9496
19808/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 11s - loss: 0.7275 - acc: 0.7677
Epoch 2/6
40000/40000 [==============================] - 10s - loss: 0.4141 - acc: 0.8772
Epoch 3/6
40000/40000 [==============================] - 10s - loss: 0.3136 - acc: 0.9056
Epoch 4/6
40000/40000 [==============================] - 10s - loss: 0.2651 - acc: 0.9210
Epoch 5/6
40000/40000 [==============================] - 10s - loss: 0.2363 - acc: 0.9306
Epoch 6/6
40000/40000 [==============================] - 10s - loss: 0.2092 - acc: 0.9380
19552/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 12s - loss: 0.7849 - acc: 0.7334
Epoch 2/3
40000/40000 [==============================] - 11s - loss: 0.4506 - acc: 0.8587
Epoch 3/3
40000/40000 [==============================] - 11s - loss: 0.3741 - acc: 0.8813
19872/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 12s - loss: 0.8744 - acc: 0.7068
Epoch 2/3
40000/40000 [==============================] - 11s - loss: 0.5231 - acc: 0.8312
Epoch 3/3
40000/40000 [==============================] - 11s - loss: 0.4305 - acc: 0.8635
19552/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 12s - loss: 0.7567 - acc: 0.7473
Epoch 2/3
40000/40000 [==============================] - 11s - loss: 0.4200 - acc: 0.8685
Epoch 3/3
40000/40000 [==============================] - 11s - loss: 0.3604 - acc: 0.8887
19712/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 12s - loss: 0.7111 - acc: 0.7676
Epoch 2/6
40000/40000 [==============================] - 11s - loss: 0.4243 - acc: 0.8669
Epoch 3/6
40000/40000 [==============================] - 11s - loss: 0.3638 - acc: 0.8873
Epoch 4/6
40000/40000 [==============================] - 11s - loss: 0.3223 - acc: 0.8995
Epoch 5/6
40000/40000 [==============================] - 11s - loss: 0.2994 - acc: 0.9073
Epoch 6/6
40000/40000 [==============================] - 11s - loss: 0.2823 - acc: 0.9135
20000/20000 [==============================] - 2s
Epoch 1/6
40000/40000 [==============================] - 12s - loss: 0.7588 - acc: 0.7513
Epoch 2/6
40000/40000 [==============================] - 11s - loss: 0.4568 - acc: 0.8570
Epoch 3/6
40000/40000 [==============================] - 12s - loss: 0.3757 - acc: 0.8819
Epoch 4/6
40000/40000 [==============================] - 11s - loss: 0.3256 - acc: 0.8969
Epoch 5/6
40000/40000 [==============================] - 11s - loss: 0.2996 - acc: 0.9060
Epoch 6/6
40000/40000 [==============================] - 11s - loss: 0.2702 - acc: 0.9146
19904/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 12s - loss: 0.7798 - acc: 0.7464
Epoch 2/6
40000/40000 [==============================] - 11s - loss: 0.4625 - acc: 0.8571
Epoch 3/6
40000/40000 [==============================] - 11s - loss: 0.3869 - acc: 0.8814
Epoch 4/6
40000/40000 [==============================] - 11s - loss: 0.3429 - acc: 0.8959
Epoch 5/6
40000/40000 [==============================] - 11s - loss: 0.3143 - acc: 0.9035
Epoch 6/6
40000/40000 [==============================] - 11s - loss: 0.2889 - acc: 0.9122
19840/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 13s - loss: 0.5828 - acc: 0.8161
Epoch 2/3
40000/40000 [==============================] - 11s - loss: 0.3009 - acc: 0.9099
Epoch 3/3
40000/40000 [==============================] - 11s - loss: 0.2393 - acc: 0.9291
19680/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 12s - loss: 0.5584 - acc: 0.8246
Epoch 2/3
40000/40000 [==============================] - 12s - loss: 0.2862 - acc: 0.9152
Epoch 3/3
40000/40000 [==============================] - 11s - loss: 0.2334 - acc: 0.9319
19488/20000 [============================>.] - ETA: 0sEpoch 1/3
40000/40000 [==============================] - 13s - loss: 0.6253 - acc: 0.8020
Epoch 2/3
40000/40000 [==============================] - 11s - loss: 0.3054 - acc: 0.9093
Epoch 3/3
40000/40000 [==============================] - 12s - loss: 0.2463 - acc: 0.9278
19808/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 13s - loss: 0.5753 - acc: 0.8200
Epoch 2/6
40000/40000 [==============================] - 12s - loss: 0.2827 - acc: 0.9170
Epoch 3/6
40000/40000 [==============================] - 11s - loss: 0.2217 - acc: 0.9339
Epoch 4/6
40000/40000 [==============================] - 11s - loss: 0.1863 - acc: 0.9455
Epoch 5/6
40000/40000 [==============================] - 12s - loss: 0.1663 - acc: 0.9516
Epoch 6/6
40000/40000 [==============================] - 12s - loss: 0.1535 - acc: 0.9550
19680/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 13s - loss: 0.5670 - acc: 0.8247
Epoch 2/6
40000/40000 [==============================] - 12s - loss: 0.2728 - acc: 0.9204
Epoch 3/6
40000/40000 [==============================] - 12s - loss: 0.2134 - acc: 0.9383
Epoch 4/6
40000/40000 [==============================] - 12s - loss: 0.1890 - acc: 0.9459
Epoch 5/6
40000/40000 [==============================] - 12s - loss: 0.1695 - acc: 0.9501
Epoch 6/6
40000/40000 [==============================] - 12s - loss: 0.1570 - acc: 0.9535
19712/20000 [============================>.] - ETA: 0sEpoch 1/6
40000/40000 [==============================] - 13s - loss: 0.6227 - acc: 0.7986
Epoch 2/6
40000/40000 [==============================] - 12s - loss: 0.3322 - acc: 0.9007
Epoch 3/6
40000/40000 [==============================] - 12s - loss: 0.2469 - acc: 0.9258
Epoch 4/6
40000/40000 [==============================] - 12s - loss: 0.2029 - acc: 0.9409
Epoch 5/6
40000/40000 [==============================] - 12s - loss: 0.1748 - acc: 0.9496
Epoch 6/6
40000/40000 [==============================] - 12s - loss: 0.1558 - acc: 0.9542
19872/20000 [============================>.] - ETA: 0sEpoch 1/6
60000/60000 [==============================] - 19s - loss: 0.4922 - acc: 0.8482
Epoch 2/6
60000/60000 [==============================] - 24s - loss: 0.2342 - acc: 0.9318
Epoch 3/6
60000/60000 [==============================] - 24s - loss: 0.1843 - acc: 0.9485
Epoch 4/6
60000/60000 [==============================] - 25s - loss: 0.1556 - acc: 0.9549
Epoch 5/6
60000/60000 [==============================] - 24s - loss: 0.1450 - acc: 0.9581
Epoch 6/6
60000/60000 [==============================] - 25s - loss: 0.1312 - acc: 0.9624
Yhe parameters of the best model are:
{'nb_conv': 3, 'nb_epoch': 6, 'nb_pool': 2, 'dense_layer_sizes': [64, 64], 'nb_filters': 8}
# validator.best_estimator_返回sklearn-warpped版本的最佳模型
# validator.best_estimator_.model返回未包装的最佳模型
best_model = validator.best_estimator_.model
# 度量值的名称
metric_names = best_model.metrics_names
# metric_names = ['loss', 'acc']
# 度量值的数值
metric_values = best_model.evaluate(X_test, y_test)
# metric_values = [0.0550, 0.9826]
print()
for metric, value in zip(metric_names, metric_values):
print(metric, ': ', value)
9984/10000 [============================>.] - ETA: 0s
loss : 0.0550105490824
acc : 0.9826
【Keras案例学习】 sklearn包装器使用示范(mnist_sklearn_wrapper)的更多相关文章
- 【Keras案例学习】 多层感知机做手写字符分类(mnist_mlp )
from __future__ import print_function # 导入numpy库, numpy是一个常用的科学计算库,优化矩阵的运算 import numpy as np np.ran ...
- 【Keras案例学习】 CNN做手写字符分类(mnist_cnn )
from __future__ import print_function import numpy as np np.random.seed(1337) from keras.datasets im ...
- ArcGIS案例学习笔记2_2_模型构建器和山顶点提取批处理
ArcGIS案例学习笔记2_2_模型构建器和山顶点提取批处理 计划时间:第二天下午 背景:数据量大,工程大 目的:自动化,批处理,定制业务流程,不写程序 教程:Pdf/343 数据:chap8/ex5 ...
- ArcGIS模型构建器案例学习笔记-字段处理模型集
ArcGIS模型构建器案例学习笔记-字段处理模型集 联系方式:谢老师,135-4855-4328,xiexiaokui@qq.com 由四个子模型组成 子模型1:判断字段是否存在 方法:python工 ...
- ArcGIS模型构建器案例学习-批量删除空要素类地理模型
ArcGIS模型构建器案例学习笔记-批量删除空要素类地理模型 联系方式:谢老师,135-4855-4328,xiexiaokui@qq.com 目的:批量删除记录个数为0的矢量文件 优点:逻辑清晰,不 ...
- keras开发成sklearn接口
我们可以通过包装器将Sequential模型(仅有一个输入)作为Scikit-Learn工作流的一部分,相关的包装器定义在keras.wrappers.scikit_learn.py中: 这里有两个包 ...
- Java虚拟机JVM学习05 类加载器的父委托机制
Java虚拟机JVM学习05 类加载器的父委托机制 类加载器 类加载器用来把类加载到Java虚拟机中. 类加载器的类型 有两种类型的类加载器: 1.JVM自带的加载器: 根类加载器(Bootstrap ...
- Mysql 索引案例学习
理解索引最好的办法是结合示例,所以这里准备了一个索引的案例. 假设要设计一个在线约会网站,用户信息表有很多列,包裹国家,地区,城市,性别,眼睛颜色,等等.完整必须支持上面这些特征的各种组合来搜索用户, ...
- 通过 Autostereograms 案例学习 OpenGL 和 OpenCL 的互操作性
引言 在过去的十年里, GPU (图形处理单元)已经从特殊硬件(特供)转变成能够在数值计算领域开辟新篇章的高性能计算机设备. 很多算法能够使用拥有巨大的处理能力的GPU来快速运行和处理大数据量.即使在 ...
随机推荐
- ZT onActivityResult在android中的用法
onActivityResult在android中的用法 举例说我想要做的一个事情是,在一个主界面(主Activity)上能连接往许多不同子功能模块(子Activity上去),当子模块的事情做完之后就 ...
- scrum 第二次冲刺
scrum 第二次冲刺 1.本周工作 本周正式开始了开发工作.首先设计了类图,建好了数据库,将整个小组的分工传到了禅道上,我主要负责后台的挂号操作. 本周分工如下: 首先搭建好了ssm框架,其中遇到了 ...
- 二、 OSI模型的实现TCP 、IP
主要名词定义: IPIP层接收由更低层(网络接口层例如以太网设备驱动程序)发来的数据包,并把该数据包发送到更高层---TCP或UDP层:相反,IP层也把从TCP或UDP层接收来的数据包传送到更低层.I ...
- nutz 结合QueryResult,Record 自定义分页查询,不构建pojo 整合
public QueryResult getHistoryIncome(int d, int curPage) throws Exception { /**sql**/ Sql sql = Sqls. ...
- Jupyter notebook 的一个问题
Traceback (most recent call last): File , in get value = obj._trait_values[self.name] KeyError: 'all ...
- SSM命名规范框架
文件名 作用 src 根目录,没什么好说的,下面有main和test. main 主要目录,可以放java代码和一些资源文件. java 存放我们的java代码,这个文件夹要使用Build Path ...
- webapi2返回 已拒绝为此请求授权。
开始用的webapi2中是没有问题的,后来再项目中加了个过滤器并继承了AuthorizeAttribute 然后在全球文件中注册你的过滤器,让每次执行的时候都会进来 我项目中只重写了OnAuthori ...
- java类的初始化程序块以及被实例化时候的执行顺序
初始化块:在类实例化过程中初始化执行顺序是先执行静态初始化块,然后执行普通初始化块,最后执行构造函数,而且静态初始化只在第一次被实例化时执行且只执行一次.public class HelloWorld ...
- LeetCode12.整数转罗马数字 JavaScript
罗马数字包含以下七种字符: I, V, X, L,C,D 和 M. 字符 数值 I 1 V 5 X 10 L 50 C 100 D 500 M 1000 例如, 罗马数字 2 写做 II ,即为两个并 ...
- LeetCode11.盛最多水的容器 JavaScript
给定 n 个非负整数 a1,a2,...,an,每个数代表坐标中的一个点 (i, ai) .在坐标内画 n 条垂直线,垂直线 i 的两个端点分别为 (i, ai) 和 (i, 0).找出其中的两条线, ...