在训练一个小的分类网络时,发现加上BatchNorm层之后的检索效果相对于之前,效果会有提升,因此将该网络结构记录在这里,供以后查阅使用:

添加该层之前:

 layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
kernel_size:
stride:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "norm1"
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
local_size:
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "norm1"
top: "conv2"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
stride:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "norm2"
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
local_size:
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "norm2"
top: "conv3"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
stride:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
stride:
kernel_size:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "fc6_srx"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7_srx"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer{
name: "loss"
type: "SoftmaxWithLoss"
top: "SoftmaxWithLoss"
bottom: "fc7"
bottom: "label"
include {
phase: TRAIN
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc7"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}

添加该层之后:

 layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
kernel_size:
stride:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv1"
top: "conv1"
name: "bn1"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size:
stride:
}
} layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
stride:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv2"
top: "conv2"
name: "bn2"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size:
stride:
}
} layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
stride:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv3"
top: "conv3"
name: "bn3"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv4"
top: "conv4"
name: "bn4"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
stride:
kernel_size:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv5"
top: "conv5"
name: "bn5"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "fc6_srx"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7_srx"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer{
name: "loss"
type: "SoftmaxWithLoss"
top: "SoftmaxWithLoss"
bottom: "fc7"
bottom: "label"
include {
phase: TRAIN
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc7"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}

caffe中的BatchNorm层的更多相关文章

  1. (原)torch和caffe中的BatchNorm层

    转载请注明出处: http://www.cnblogs.com/darkknightzh/p/6015990.html BatchNorm具体网上搜索. caffe中batchNorm层是通过Batc ...

  2. caffe 中 python 数据层

    caffe中大多数层用C++写成. 但是对于自己数据的输入要写对应的输入层,比如你要去图像中的一部分,不能用LMDB,或者你的label 需要特殊的标记. 这时候就需要用python 写一个输入层. ...

  3. caffe中全卷积层和全连接层训练参数如何确定

    今天来仔细讲一下卷基层和全连接层训练参数个数如何确定的问题.我们以Mnist为例,首先贴出网络配置文件: name: "LeNet" layer { name: "mni ...

  4. caffe中添加local层

    下载caffe-local,解压缩; 修改makefile.config:我是将cuudn注释掉,去掉cpu_only的注释; make all make test(其中local_test出错,将文 ...

  5. 转载:caffe中的Reshape层

    http://blog.csdn.net/terrenceyuu/article/details/76228317 #作用:在不改变数据的情况下,改变输入的维度 layer { name: " ...

  6. caffe中各层的作用:

    关于caffe中的solver: cafffe中的sover的方法都有: Stochastic Gradient Descent (type: "SGD"), AdaDelta ( ...

  7. 深度学习中 batchnorm 层是咋回事?

    作者:Double_V_ 来源:CSDN 原文:https://blog.csdn.net/qq_25737169/article/details/79048516 版权声明:本文为博主原创文章,转载 ...

  8. caffe中ConvolutionLayer的前向和反向传播解析及源码阅读

    一.前向传播 在caffe中,卷积层做卷积的过程被转化成了由卷积核的参数组成的权重矩阵weights(简记为W)和feature map中的元素组成的输入矩阵(简记为Cin)的矩阵乘积W * Cin. ...

  9. caffe中batch norm源码阅读

    1. batch norm 输入batch norm层的数据为[N, C, H, W], 该层计算得到均值为C个,方差为C个,输出数据为[N, C, H, W]. <1> 形象点说,均值的 ...

随机推荐

  1. virtualbox之usb设备的分配

    来源:http://www.cnblogs.com/fsjohnhuang/p/3987545.html 首先下载安装virtualbox的扩展包,因为box原本不支持usb设备.www.virtua ...

  2. windows Service

    用c#中创建一个windows服务非常简单,与windows服务相关的类都在System.ServiceProcess命名空间下. 每个服务都需要继承自ServiceBase类,并重写相应的启动.暂停 ...

  3. linux升级openssl

    wget https://www.openssl.org/source/openssl-1.0.2j.tar.gz ./config shared zlib-dynamicconfig完成后执行 ma ...

  4. 原生js完成拼图小游戏

    废话不说,看代码,图片可以自己找,我这直接引用了百度的了 <!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml ...

  5. 牛B的调试工具:OzCode

      官网:http://www.oz-code.com/ 视频:https://channel9.msdn.com/Shows/Visual-Studio-Toolbox/OzCode https:/ ...

  6. CodeTimerPerformance EasyPerformanceCounterHelper .NET 4.5

    //#define NET35 namespace TestConsoleApplication { using System; using System.Diagnostics; using Sys ...

  7. 在SQL SERVER中实现RSA加解密函数(第一版)

    /*************************************************** 作者:herowang(让你望见影子的墙) 日期:2010.1.1 注:   转载请保留此信息 ...

  8. LinQ和ADO.Net增删改查 备忘

    是否些倦了 SqlConnection conn=new SqlConnection();一系列繁冗的代码? 来试试Linq吧 查: using System.Data.SqlClient; name ...

  9. mac os x 10.10.3 安装protoc

    预装如下环境 autoconf 2.6.9automake 1.14libtool 2.4 Building from source Download latest version of procbu ...

  10. CocoaPods安装和使用

    CocoaPods是iOS最常用的第三方类库管理工具,绝大部分有名的开源类库都支持CocoaPods. CocoaPods是用Ruby实现的,要想使用它首先需要有Ruby的环境.幸运的是OS X系统默 ...