GoogLeNet网络的Pytorch实现
1.文章原文地址
Going deeper with convolutions
2.文章摘要
我们提出了一种代号为Inception的深度卷积神经网络,它在ILSVRC2014的分类和检测任务上都取得当前最佳成绩。这种结构的主要特点是提高了网络内部计算资源的利用率。这是通过精心的设计实现的,它允许增加网络的深度和宽度,同时保持计算预算不变。为了提高效果,这个网络的架构确定是基于Hebbian原则和多尺度处理的直觉。其中一个典型的实例用于提交到ILSVRC2014上,我们称之为GoogLeNet,它是一个22层的深度网络,该网络的效果通过分类和检测任务来加以评估。
3.网络结构


4.Pytorch实现
import warnings
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from torchsummary import summary __all__ = ['GoogLeNet', 'googlenet'] model_urls = {
# GoogLeNet ported from TensorFlow
'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',
} _GoogLeNetOuputs = namedtuple('GoogLeNetOuputs', ['logits', 'aux_logits2', 'aux_logits1']) def googlenet(pretrained=False, progress=True, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = GoogLeNet(**kwargs)
state_dict = load_state_dict_from_url(model_urls['googlenet'],
progress=progress)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
del model.aux1, model.aux2
return model return GoogLeNet(**kwargs) class GoogLeNet(nn.Module): def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, init_weights=True):
super(GoogLeNet, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True) #向上取整
self.conv2 = BasicConv2d(64, 64, kernel_size=1)
self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128) if aux_logits:
self.aux1 = InceptionAux(512, num_classes)
self.aux2 = InceptionAux(528, num_classes) self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(1024, num_classes) if init_weights:
self._initialize_weights() def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0) def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1) # N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x) # N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
if self.training and self.aux_logits:
aux1 = self.aux1(x) x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
if self.training and self.aux_logits:
aux2 = self.aux2(x) x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7 x = self.avgpool(x)
# N x 1024 x 1 x 1
x = x.view(x.size(0), -1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
if self.training and self.aux_logits:
return _GoogLeNetOuputs(x, aux2, aux1)
return x class Inception(nn.Module): #Inception模块 def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
super(Inception, self).__init__() self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1) self.branch2 = nn.Sequential(
BasicConv2d(in_channels, ch3x3red, kernel_size=1),
BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)
) self.branch3 = nn.Sequential(
BasicConv2d(in_channels, ch5x5red, kernel_size=1),
BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1)
) self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
BasicConv2d(in_channels, pool_proj, kernel_size=1)
) def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x) outputs = [branch1, branch2, branch3, branch4]
return torch.cat(outputs, 1) class InceptionAux(nn.Module): #辅助分支 def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv = BasicConv2d(in_channels, 128, kernel_size=1) self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes) def forward(self, x):
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = x.view(x.size(0), -1)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
# N x 1024
x = F.dropout(x, 0.7, training=self.training)
# N x 1024
x = self.fc2(x)
# N x num_classes return x class BasicConv2d(nn.Module): #Conv2d+BN+Relu def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True) if __name__=="__main__":
model=googlenet()
print(model,(3,224,224))
参考
https://github.com/pytorch/vision/tree/master/torchvision/models
GoogLeNet网络的Pytorch实现的更多相关文章
- 跟我学算法-图像识别之图像分类(下)(GoogleNet网络, ResNet残差网络, ResNext网络, CNN设计准则)
1.GoogleNet 网络: Inception V1 - Inception V2 - Inception V3 - Inception V4 1. Inception v1 split - me ...
- 群等变网络的pytorch实现
CNN对于旋转不具有等变性,对于平移有等变性,data augmentation的提出就是为了解决这个问题,但是data augmentation需要很大的模型容量,更多的迭代次数才能够在训练数据集合 ...
- U-Net网络的Pytorch实现
1.文章原文地址 U-Net: Convolutional Networks for Biomedical Image Segmentation 2.文章摘要 普遍认为成功训练深度神经网络需要大量标注 ...
- ResNet网络的Pytorch实现
1.文章原文地址 Deep Residual Learning for Image Recognition 2.文章摘要 神经网络的层次越深越难训练.我们提出了一个残差学习框架来简化网络的训练,这些 ...
- AlexNet网络的Pytorch实现
1.文章原文地址 ImageNet Classification with Deep Convolutional Neural Networks 2.文章摘要 我们训练了一个大型的深度卷积神经网络用于 ...
- VGG网络的Pytorch实现
1.文章原文地址 Very Deep Convolutional Networks for Large-Scale Image Recognition 2.文章摘要 在这项工作中,我们研究了在大规模的 ...
- googLeNet网络
1.什么是inception结构 2.什么是Hebbian原理 3.什么是多尺度处理 最近深度学习的发展,大多来源于新的想法,算法以及网络结构的改善,而不是依赖于硬件,新的数据集,更深的网络,并且深度 ...
- SegNet网络的Pytorch实现
1.文章原文地址 SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation 2.文章摘要 语义分 ...
- 深度学习——卷积神经网络 的经典网络(LeNet-5、AlexNet、ZFNet、VGG-16、GoogLeNet、ResNet)
一.CNN卷积神经网络的经典网络综述 下面图片参照博客:http://blog.csdn.net/cyh_24/article/details/51440344 二.LeNet-5网络 输入尺寸:32 ...
随机推荐
- git rebase VS git merge
git rebase VS git merge 写在前面 如果你不能很好的应用 Git,那么这里为你提供一个非常棒的 Git 在线练习工具 Git Online(回复公众号「工具」,获取更多内容) , ...
- 第07组 Alpha冲刺(3/4)
队名:秃头小队 组长博客 作业博客 组长徐俊杰 过去两天完成的任务:完成人员分配,初步学习Android开发 Github签入记录 接下来的计划:继续完成Android开发的学习,带领团队进行前后端开 ...
- 感受typescript定义变量和数据类型的神奇魔力
变量和数据类型 你的Javascript能力到达瓶颈?那是因为你还不会typescript.掌握TS,让你的开发更加准确简洁. 今天的学习中,我们接着从TS的数据类型和变量入手,感受它们的奇妙魔力. ...
- linux下源码安装rabbitMq
一.安装erlang前期环境安装1.利用yum安装erlang编译所依赖的环境 yum -y install make gcc gcc-c++ kernel-devel m4ncurses-devel ...
- linux软件安装习惯
Linux 的软件安装目录是也是有讲究的,理解这一点,在对系统管理是有益的 /usr:系统级的目录,可以理解为C:/Windows/,/usr/lib理解为C:/Windows/System32./u ...
- LeetCode 179. 最大数(Largest Number) 21
179. 最大数 179. Largest Number 题目描述 给定一组非负整数,重新排列它们的顺序使之组成一个最大的整数. 每日一算法2019/5/24Day 21LeetCode179. La ...
- git使用mvn clean install 报错原因排查
使用命令行git-bath.exe 来拉代码并进行编译之类的服务,结果在拉依赖时一直报错连的是144.131.254.26,看了maven的setting配置 文件 没并没有错, 最终定位问题是 gi ...
- PAT(B) 1074 宇宙无敌加法器(Java)
题目链接:1074 宇宙无敌加法器 (20 point(s)) 题目描述 地球人习惯使用十进制数,并且默认一个数字的每一位都是十进制的.而在 PAT 星人开挂的世界里,每个数字的每一位都是不同进制的, ...
- SAS学习笔记56 ODS ESCAPECHAR
这种内嵌格式独立于style型和table型,它既可以结合二者使用,也可以独立使用.它主要通过下列语句的格式形式来进行调用: ODS ESCAPECHAR ‘^’; 上述符号’^’表示触发条件,如果碰 ...
- PowerShell自动部署ASP.NET Core程序到 IIS
Windows PowerShell 是一种命令行外壳程序和脚本环境,使命令行用户和脚本编写者可以利用 .NET Framework的强大功能.有关于更多PowerShell的信息,可参阅百度词条 接 ...