Deeplab v3+中的骨干模型resnet(加入atrous)的源码解析,以及普通resnet整个结构的构建过程
加入带洞卷积的resnet结构的构建,以及普通resnet如何通过模块的组合来堆砌深层卷积网络。
第一段代码为deeplab v3+(pytorch版本)中的基本模型改进版resnet的构建过程,
第二段代码为model的全部结构图示,以文字的方式表示,forward过程并未显示其中
import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d class Bottleneck(nn.Module):
# 此类为resnet的基本模块,在构建resnet不同层的时候,主要以模块的个数以及参数不同来区分。
expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation def forward(self, x):
# 传播的过程,在这里设置残差的操作;
residual = x out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out) out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out) out = self.conv3(out)
out = self.bn3(out) if self.downsample is not None:
residual = self.downsample(x) out += residual
out = self.relu(out) return out class ResNet(nn.Module):
# 加入atrous的resnet结构,获取不同的感受野以及上下文信息。
def __init__(self, block, layers, output_stride, BatchNorm, pretrained=True):
# 定义resnet的基本结构,通过前面的几层直接设计加上不同参数的bottleneck模块的组合构成;
# layers参数,在创建resnet类对象的时候,赋予一个数组,在构建多层网络模块的时候调用。
# block代表模块结构,在这里指的是bottleneck.
self.inplanes = 64
super(ResNet, self).__init__()
blocks = [1, 2, 4]
if output_stride == 16:
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
elif output_stride == 8:
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 4]
else:
raise NotImplementedError # Modules
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# 下面通过调用make_layer函数来构造不同参数的bottleneck模块;
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
self._init_weight() if pretrained:
self._load_pretrained_model() def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
# block传入的是bottleneck模块;
# planes参数是改变卷积层参数的重要变量,在这里分别传入64.128.256.512,
# 目的是为了给conv2d(in_channels,out_channels,kernel_size,stride,……)传递各种参数, # blocks是定义有几个参数相同的bottleneck模块,即在最下面的Layers参数【3.4.23.3】,在总模型结构图中可以清晰的看出。
# dilation参数是为了设置带洞卷积(atrous),默认为1即普通卷积;
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
) layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample, BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm)) return nn.Sequential(*layers) def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
) layers = []
layers.append(block(self.inplanes, planes, stride, dilation=blocks[0]*dilation,
downsample=downsample, BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, len(blocks)):
layers.append(block(self.inplanes, planes, stride=1,
dilation=blocks[i]*dilation, BatchNorm=BatchNorm)) return nn.Sequential(*layers) def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x) x = self.layer1(x)
low_level_feat = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_feat def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() def _load_pretrained_model(self):
# pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')
pretrain_dict = torch.load('/home/huihua/NewDisk/resnet50-19c8e357.pth')
# 直接加载下载好模型预训练的参数,不用再次下载
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict) def ResNet101(output_stride, BatchNorm, pretrained=True):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, BatchNorm, pretrained=pretrained)
# 【3,4,23,3】代表make_layer中的block(bottleneck)的块数。resnet源代码中以此来确定resnet50或者101以及更深的。
return model if __name__ == "__main__":
import torch
model = ResNet101(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=8)
print(model) #打印模型结构,方便观察如何构造,以及各个参数的含义。
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
打印出的model结构如下:
/home/huihua/anaconda3/bin/python /home/huihua/PycharmProjects/untitled/pytorch-deeplab-xception-master/modeling/backbone/resnet.py
ResNet(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(maxpool): MaxPool2d(kernel_size=, stride=, padding=, dilation=, ceil_mode=False)
(layer1): Sequential(
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
(layer2): Sequential(
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
(layer3): Sequential(
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
(layer4): Sequential(
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
)
torch.Size([, , , ])
torch.Size([, , , ]) Process finished with exit code
Deeplab v3+中的骨干模型resnet(加入atrous)的源码解析,以及普通resnet整个结构的构建过程的更多相关文章
- Java生鲜电商平台-电商中海量搜索ElasticSearch架构设计实战与源码解析
Java生鲜电商平台-电商中海量搜索ElasticSearch架构设计实战与源码解析 生鲜电商搜索引擎的特点 众所周知,标准的搜索引擎主要分成三个大的部分,第一步是爬虫系统,第二步是数据分析,第三步才 ...
- Java中的容器(集合)之HashMap源码解析
1.HashMap源码解析(JDK8) 基础原理: 对比上一篇<Java中的容器(集合)之ArrayList源码解析>而言,本篇只解析HashMap常用的核心方法的源码. HashMap是 ...
- MapReduce中一次reduce方法的调用中key的值不断变化分析及源码解析
摘要:mapreduce中执行reduce(KEYIN key, Iterable<VALUEIN> values, Context context),调用一次reduce方法,迭代val ...
- 神经网络中 BP 算法的原理与 Python 实现源码解析
最近这段时间系统性的学习了 BP 算法后写下了这篇学习笔记,因为能力有限,若有明显错误,还请指正. 什么是梯度下降和链式求导法则 假设我们有一个函数 J(w),如下图所示. 梯度下降示意图 现在,我们 ...
- Java中的容器(集合)之ArrayList源码解析
1.ArrayList源码解析 源码解析: 如下源码来自JDK8(如需查看ArrayList扩容源码解析请跳转至<Java中的容器(集合)>第十条):. package java.util ...
- 关于原生js中函数的三种角色和jQuery源码解析
原生js中的函数有三种角色: 分两大种: 1.函数(最主要的角色)2.普通对象(辅助角色):函数也可以像对象一样设置属于本身的私有属性和方法,这些东西和实例或者私有变量没有关系两种角色直接没有必然的关 ...
- [源码解析] PyTorch分布式优化器(3)---- 模型并行
[源码解析] PyTorch分布式优化器(3)---- 模型并行 目录 [源码解析] PyTorch分布式优化器(3)---- 模型并行 0x00 摘要 0x01 前文回顾 0x02 单机模型 2.1 ...
- 量化交易中VWAP/TWAP算法的基本原理和简单源码实现(C++和python)(转)
量化交易中VWAP/TWAP算法的基本原理和简单源码实现(C++和python) 原文地址:http://blog.csdn.net/u012234115/article/details/728300 ...
- 谷歌BERT预训练源码解析(二):模型构建
目录前言源码解析模型配置参数BertModelword embeddingembedding_postprocessorTransformerself_attention模型应用前言BERT的模型主要 ...
随机推荐
- 解决ubuntu系统中firefox无法播放网页版音乐播放器音乐
Reference: https://blog.csdn.net/h736131708/article/details/80775382 因为网页版的qq音乐或者网易云音乐都把音频换成了AAC格式,这 ...
- 34对MyBatis的博客的整理心得
对本博客的mybatis重新读一下,做一个整理.如下: 1:为什么会有mybatis,因为原生的jdbc方式有很大问题: (1)数据库连接,使用时就创建,不使用立即释放,对数据库进行频繁连接开启和关闭 ...
- 【转】WPF自定义控件与样式(12)-缩略图ThumbnailImage /gif动画图/图片列表
一.前言 申明:WPF自定义控件与样式是一个系列文章,前后是有些关联的,但大多是按照由简到繁的顺序逐步发布的等,若有不明白的地方可以参考本系列前面的文章,文末附有部分文章链接. 本文主要针对WPF项目 ...
- 115、如何构建Android MVVM 应用框架(转载)
转载:http://android.jobbole.com/85198/
- Qt编写视频监控画面分割界面(开源)
其实qt应用在安防领域还是蛮多的,尤其是视频监控系统,但是网上几乎没有看到qt做的最基础的视频监控画面分割的demo,今天特意花几分钟提取出来,开源放出来.欢迎大家多多点赞!源码下载:点击打开链接 运 ...
- 微信小游戏下socket.io的使用
参考: 微信小游戏:socket.io 一 在微信小游戏 中使用socket.io报错 因为项目需求,后端要使用nodejs简单搭建一个服务器,通讯用json格式. 使用Egret提供的socket. ...
- array_rand
array_rand — 从数组中随机取出一个或多个单元 mixed array_rand ( array $array [, int $num = 1 ] ) 从数组中取出一个或多个随机的单元,并返 ...
- java web实现在cookie中保存用户名和密码,用户自动登入
<%@ page language="java" contentType="text/html; charset=UTF-8" pageEncoding= ...
- jquery怎么实现点击一个按钮控制一个div的显示和隐藏
示例html 1 2 <div class="abc" style="display:none"></div> <input ty ...
- 【转载】python抓取网页时候,判断网页编码格式
在web开发的时候我们经常会遇到网页抓取和分析,各种语言都可以完成这个功能.我喜欢用python实现,因为python提供了很多成熟的模块,可以很方便的实现网页抓取.但是在抓取过程中会遇到编码的问题, ...