import paddle.nn as nn
class ResidualBlock(nn.Layer):
def __init__(self, in_channels, out_channels, stride = 1, downsample = None):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2D(in_channels, out_channels, kernel_size = 3, stride = stride, padding = 1),
nn.BatchNorm2D(out_channels),
nn.ReLU())
self.conv2 = nn.Sequential(
nn.Conv2D(out_channels, out_channels, kernel_size = 3, stride = 1, padding = 1),
nn.BatchNorm2D(out_channels))
self.downsample = downsample
self.relu = nn.ReLU()
self.out_channels = out_channels def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out class ResNet(nn.Layer):
def __init__(self, block, layers, num_classes = 1000):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Sequential(
nn.Conv2D(3, 64, kernel_size = 7, stride = 2, padding = 3),
nn.BatchNorm2D(64),
nn.ReLU())
self.maxpool = nn.MaxPool2D(kernel_size = 3, stride = 2, padding = 1)
self.layer0 = self._make_layer(block, 64, layers[0], stride = 1)
self.layer1 = self._make_layer(block, 128, layers[1], stride = 2)
self.layer2 = self._make_layer(block, 256, layers[2], stride = 2)
self.layer3 = self._make_layer(block, 512, layers[3], stride = 2)
self.avgpool = nn.AvgPool2D(7, stride=1)
self.fc = nn.Linear(2048, num_classes) def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes: downsample = nn.Sequential(
nn.Conv2D(self.inplanes, planes, kernel_size=1, stride=stride),
nn.BatchNorm2D(planes),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes
for i in range(1, blocks):
layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x) x = self.avgpool(x)
# x = x.view(x.size(0), -1)
x = paddle.reshape(x, [x.shape[0],-1])
x = self.fc(x) return x model = ResNet(ResidualBlock, [3, 4, 6, 3], num_classes=2)#模型实例化
paddle.Model(model).summary((-1, 3, 256, 256))
W0505 09:07:12.146911  5588 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.2, Runtime API Version: 10.1
W0505 09:07:12.151273 5588 device_context.cc:465] device: 0, cuDNN Version: 7.6.
----------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
============================================================================
Conv2D-1 [[1, 3, 256, 256]] [1, 64, 128, 128] 9,472
BatchNorm2D-1 [[1, 64, 128, 128]] [1, 64, 128, 128] 256
ReLU-1 [[1, 64, 128, 128]] [1, 64, 128, 128] 0
MaxPool2D-1 [[1, 64, 128, 128]] [1, 64, 64, 64] 0
Conv2D-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
ResidualBlock-1 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-4 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-4 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-4 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-5 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-5 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-5 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
ResidualBlock-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-6 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-6 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-6 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-7 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-7 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-7 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
ResidualBlock-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-9 [[1, 64, 64, 64]] [1, 128, 32, 32] 73,856
BatchNorm2D-9 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-8 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-10 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-10 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
Conv2D-8 [[1, 64, 64, 64]] [1, 128, 32, 32] 8,320
BatchNorm2D-8 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-9 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-4 [[1, 64, 64, 64]] [1, 128, 32, 32] 0
Conv2D-11 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-11 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-10 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-12 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-12 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-11 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-5 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-13 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-13 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-12 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-14 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-14 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-13 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-6 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-15 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-15 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-14 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-16 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-16 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-15 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-7 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-18 [[1, 128, 32, 32]] [1, 256, 16, 16] 295,168
BatchNorm2D-18 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-16 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-19 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-19 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
Conv2D-17 [[1, 128, 32, 32]] [1, 256, 16, 16] 33,024
BatchNorm2D-17 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-17 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-8 [[1, 128, 32, 32]] [1, 256, 16, 16] 0
Conv2D-20 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-20 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-18 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-21 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-21 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-19 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-9 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-22 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-22 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-20 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-23 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-23 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-21 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-10 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-24 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-24 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-22 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-25 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-25 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-23 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-11 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-26 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-26 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-24 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-27 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-27 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-25 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-12 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-28 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-28 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-26 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-29 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-29 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-27 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-13 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-31 [[1, 256, 16, 16]] [1, 512, 8, 8] 1,180,160
BatchNorm2D-31 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-28 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-32 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-32 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
Conv2D-30 [[1, 256, 16, 16]] [1, 512, 8, 8] 131,584
BatchNorm2D-30 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-29 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
ResidualBlock-14 [[1, 256, 16, 16]] [1, 512, 8, 8] 0
Conv2D-33 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-33 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-30 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-34 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-34 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-31 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
ResidualBlock-15 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-35 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-35 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-32 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-36 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-36 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-33 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
ResidualBlock-16 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
AvgPool2D-1 [[1, 512, 8, 8]] [1, 512, 2, 2] 0
Linear-1 [[1, 2048]] [1, 2] 4,098
============================================================================
Total params: 21,314,306
Trainable params: 21,280,258
Non-trainable params: 34,048
----------------------------------------------------------------------------
Input size (MB): 0.75
Forward/backward pass size (MB): 125.77
Params size (MB): 81.31
Estimated Total Size (MB): 207.82
---------------------------------------------------------------------------- {'total_params': 21314306, 'trainable_params': 21280258}

PaddlePaddle 飞桨复现 ResNet34的更多相关文章

  1. 提速1000倍,预测延迟少于1ms,百度飞桨发布基于ERNIE的语义理解开发套件

    提速1000倍,预测延迟少于1ms,百度飞桨发布基于ERNIE的语义理解开发套件 11月5日,在『WAVE Summit+』2019 深度学习开发者秋季峰会上,百度对外发布基于 ERNIE 的语义理解 ...

  2. 树莓派4B安装 百度飞桨paddlelite 做视频检测 (一、环境安装)

    前言: 当前准备重新在树莓派4B8G 上面搭载训练模型进行识别检测,训练采用了百度飞桨的PaddleX再也不用为训练部署环境各种报错发愁了,推荐大家使用. 关于在树莓派4B上面paddlelite的文 ...

  3. Ubuntu 百度飞桨和 CUDA 的安装

    Ubuntu 百度飞桨 和 CUDA 的安装 1.简介 本文主要是 Ubuntu 百度飞桨 和 CUDA 的安装 系统:Ubuntu 20.04 百度飞桨:2.2 为例 2.百度飞桨安装 访问百度飞桨 ...

  4. 【一】ERNIE:飞桨开源开发套件,入门学习,看看行业顶尖持续学习语义理解框架,如何取得世界多个实战的SOTA效果?

    ​ 参考文章: 深度剖析知识增强语义表示模型--ERNIE_财神Childe的博客-CSDN博客_ernie模型 ERNIE_ERNIE开源开发套件_飞桨 https://github.com/Pad ...

  5. 百度飞桨数据处理 API 数据格式 HWC CHW 和 PIL 图像处理之间的关系

    使用百度飞桨 API 例如:Resize Normalize,处理数据的时候. Resize:如果输入的图像是 PIL 读取的图像这个数据格式是 HWC ,Resize 就需要 HWC 格式的数据. ...

  6. 【百度飞桨】手写数字识别模型部署Paddle Inference

    从完成一个简单的『手写数字识别任务』开始,快速了解飞桨框架 API 的使用方法. 模型开发 『手写数字识别』是深度学习里的 Hello World 任务,用于对 0 ~ 9 的十类数字进行分类,即输入 ...

  7. 我做的百度飞桨PaddleOCR .NET调用库

    我做的百度飞桨PaddleOCR .NET调用库 .NET Conf 2021中国我做了一次<.NET玩转计算机视觉OpenCV>的分享,其中提到了一个效果特别好的OCR识别引擎--百度飞 ...

  8. 飞桨paddlespeech语音唤醒推理C实现

    上篇(飞桨paddlespeech 语音唤醒初探)初探了paddlespeech下的语音唤醒方案,通过调试也搞清楚了里面的细节.因为是python 下的,不能直接部署,要想在嵌入式上部署需要有C下的推 ...

  9. 飞桨AI 文本实体抽取 数据准备(excel 文本标注)

    网纸: https://ai.baidu.com/easydl/app/deploy/tee/public #!/usr/bin/env python3 # -*- coding: utf-8 -*- ...

  10. 手把手0基础Centos下安装与部署paddleOcr 教程

    !!!以下内容为作者原创,首发于个人博客园&掘金平台.未经原作者同意与许可,任何人.任何组织不得以任何形式转载.原创不易,如果对您的问题提供了些许帮助,希望得到您的点赞支持. 0.paddle ...

随机推荐

  1. FFT及NTT

    FFT--快速傅里叶变换(附NTT--快速数论变换) FFT是一种能在O(nlogn)时空复杂度内求解两个函数卷积的优秀算法. 算法思想(DFT): 对于函数 \(f(x)=\Sigma_{i=0}^ ...

  2. Windhill获取团队角色、用户

    //获取容器团队里的用户和角色,也可以获取容器团队里某一角色的用户 WTContainer pContainer = project.getContainer(); if (pContainer in ...

  3. warning: the `gets' function is dangerous and should not be used.

    LINUX下编译C程序时,出现了:warning: the `gets' function is dangerous and should not be used. 原因:Linux 下gcc编译器不 ...

  4. 华大单片机HC32L13X软件设计时候要注意的事项

    1.系统启动时默认设置主频为内部4MHz; 2.调试超低功耗程序或者把SWD端口复用为GPIO功能都会把芯片的SWD功能关掉,仿真器将会与芯片失去连接,建议在main函数开始后加上1到2秒的延时,仿真 ...

  5. Linux下apache日志(按日期存放)分析与状态查看方法

    转载网址: https://blog.csdn.net/weixin_42272246/article/details/125602258

  6. 震惊,一行MD5居然让小伙伴都回不了家!!!

    作者:京东零售 付伟 1. 前言 大家好,当你点开这篇文章的时候也许心想是哪个 XX 小编混到这里,先不要着急扔臭鸡蛋,本文是一篇标准(正经)的问题复盘文章.好了,一行MD5居然让小伙伴下不了班,到底 ...

  7. GRU简介

    一.GRU介绍 GRU是LSTM网络的一种效果很好的变体,它较LSTM网络的结构更加简单,而且效果也很好,因此也是当前非常流形的一种网络.GRU既然是LSTM的变体,因此也是可以解决RNN网络中的长依 ...

  8. uniapp踩坑必备笔记

    1.[配置]应用版本号名称有一个规则的字符串:1.0.0,规则是:大版本号,中版本号,小版本号. 2.[配置]应用版本号中的小版本号不能超过9,超过9的需要向上一个版本号进一(逢十进一). 3.[配置 ...

  9. Java面试——SQL语句题

    更多内容,前往IT-BLOG 一.行转列问题 现有表格A,按照以下格式排列: 姓名 收入类型 收入金额 Tom 年奖金 5w Tom 月工资 10k Jack 年奖金 8w Jack 月工资 12k ...

  10. Teamcenter_NX集成开发:UF_UGMGR_invoke_pdm_server函数的使用

    之前了解到通过UFUN函数UF_UGMGR_invoke_pdm_server可以调用Teamcenter ITK函数,从而可以获取及编辑Teamcenter对象.UFUN中有样例代码,但是就是不知道 ...