1.构建一个简单的网络层

 from __future__ import absolute_import, division, print_function
import tensorflow as tf
tf.keras.backend.clear_session()
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
 # 定义网络层就是:设置网络权重和输出到输入的计算过程
class MyLayer(layers.Layer):
def __init__(self, input_dim=32, unit=32):
super(MyLayer, self).__init__() w_init = tf.random_normal_initializer()
self.weight = tf.Variable(initial_value=w_init(
shape=(input_dim, unit), dtype=tf.float32), trainable=True) b_init = tf.zeros_initializer()
self.bias = tf.Variable(initial_value=b_init(
shape=(unit,), dtype=tf.float32), trainable=True) def call(self, inputs):
return tf.matmul(inputs, self.weight) + self.bias x = tf.ones((3,5))
my_layer = MyLayer(5, 4)
out = my_layer(x)
print(out)
 tf.Tensor(
[[0.06709253 0.06818779 0.09926171 0.0179923 ]
[0.06709253 0.06818779 0.09926171 0.0179923 ]
[0.06709253 0.06818779 0.09926171 0.0179923 ]], shape=(3, 4), dtype=float32)

按上面构建网络层,图层会自动跟踪权重w和b,当然我们也可以直接用add_weight的方法构建权重

 class MyLayer(layers.Layer):
def __init__(self, input_dim=32, unit=32):
super(MyLayer, self).__init__()
self.weight = self.add_weight(shape=(input_dim, unit),
initializer=keras.initializers.RandomNormal(),
trainable=True)
self.bias = self.add_weight(shape=(unit,),
initializer=keras.initializers.Zeros(),
trainable=True) def call(self, inputs):
return tf.matmul(inputs, self.weight) + self.bias x = tf.ones((3,5))
my_layer = MyLayer(5, 4)
out = my_layer(x)
print(out)
 tf.Tensor(
[[-0.10401802 -0.05459599 -0.08195674 0.13151655]
[-0.10401802 -0.05459599 -0.08195674 0.13151655]
[-0.10401802 -0.05459599 -0.08195674 0.13151655]], shape=(3, 4), dtype=float32)

也可以设置不可训练的权重

 class AddLayer(layers.Layer):
def __init__(self, input_dim=32):
super(AddLayer, self).__init__()
self.sum = self.add_weight(shape=(input_dim,),
initializer=keras.initializers.Zeros(),
trainable=False) def call(self, inputs):
self.sum.assign_add(tf.reduce_sum(inputs, axis=0))
return self.sum x = tf.ones((3,3))
my_layer = AddLayer(3)
out = my_layer(x)
print(out.numpy())
out = my_layer(x)
print(out.numpy())
print('weight:', my_layer.weights)
print('non-trainable weight:', my_layer.non_trainable_weights)
print('trainable weight:', my_layer.trainable_weights)
 [3. 3. 3.]
[6. 6. 6.]
weight: [<tf.Variable 'Variable:0' shape=(3,) dtype=float32, numpy=array([6., 6., 6.], dtype=float32)>]
non-trainable weight: [<tf.Variable 'Variable:0' shape=(3,) dtype=float32, numpy=array([6., 6., 6.], dtype=float32)>]
trainable weight: []

当定义网络时不知道网络的维度是可以重写build()函数,用获得的shape构建网络

 class MyLayer(layers.Layer):
def __init__(self, unit=32):
super(MyLayer, self).__init__()
self.unit = unit def build(self, input_shape):
self.weight = self.add_weight(shape=(input_shape[-1], self.unit),
initializer=keras.initializers.RandomNormal(),
trainable=True)
self.bias = self.add_weight(shape=(self.unit,),
initializer=keras.initializers.Zeros(),
trainable=True) def call(self, inputs):
return tf.matmul(inputs, self.weight) + self.bias my_layer = MyLayer(3)
x = tf.ones((3,5))
out = my_layer(x)
print(out)
my_layer = MyLayer(3) x = tf.ones((2,2))
out = my_layer(x)
print(out)
 tf.Tensor(
[[ 0.00949192 -0.02009935 -0.11726624]
[ 0.00949192 -0.02009935 -0.11726624]
[ 0.00949192 -0.02009935 -0.11726624]], shape=(3, 3), dtype=float32)
tf.Tensor(
[[-0.00516411 -0.04891593 -0.0181773 ]
[-0.00516411 -0.04891593 -0.0181773 ]], shape=(2, 3), dtype=float32)

2.使用子层递归构建网络层

 class MyBlock(layers.Layer):
def __init__(self):
super(MyBlock, self).__init__()
self.layer1 = MyLayer(32)
self.layer2 = MyLayer(16)
self.layer3 = MyLayer(2)
def call(self, inputs):
h1 = self.layer1(inputs)
h1 = tf.nn.relu(h1)
h2 = self.layer2(h1)
h2 = tf.nn.relu(h2)
return self.layer3(h2) my_block = MyBlock()
print('trainable weights:', len(my_block.trainable_weights))
y = my_block(tf.ones(shape=(3, 64)))
# 构建网络在build()里面,所以执行了才有网络
print('trainable weights:', len(my_block.trainable_weights))
 trainable weights: 0
trainable weights: 6

可以通过构建网络层的方法来收集loss

 class LossLayer(layers.Layer):

   def __init__(self, rate=1e-2):
super(LossLayer, self).__init__()
self.rate = rate def call(self, inputs):
self.add_loss(self.rate * tf.reduce_sum(inputs))
return inputs class OutLayer(layers.Layer):
def __init__(self):
super(OutLayer, self).__init__()
self.loss_fun=LossLayer(1e-2)
def call(self, inputs):
return self.loss_fun(inputs) my_layer = OutLayer()
print(len(my_layer.losses)) # 还未call
y = my_layer(tf.zeros(1,1))
print(len(my_layer.losses)) # 执行call之后
y = my_layer(tf.zeros(1,1))
print(len(my_layer.losses)) # call之前会重新置0
 0
1
1

如果中间调用了keras网络层,里面的正则化loss也会被加入进来

 class OuterLayer(layers.Layer):

     def __init__(self):
super(OuterLayer, self).__init__()
self.dense = layers.Dense(32, kernel_regularizer=tf.keras.regularizers.l2(1e-3)) def call(self, inputs):
return self.dense(inputs) my_layer = OuterLayer()
y = my_layer(tf.zeros((1,1)))
print(my_layer.losses)
print(my_layer.weights)
 [<tf.Tensor: id=413, shape=(), dtype=float32, numpy=0.0018067828>]
[<tf.Variable 'outer_layer_1/dense_1/kernel:0' shape=(1, 32) dtype=float32, numpy=
array([[-0.11054656, 0.34735924, -0.22560999, 0.38415992, 0.13070339,
0.15960163, 0.20130599, 0.40365922, -0.09471637, -0.02402192,
0.16438413, 0.2716753 , 0.0594548 , -0.06913272, -0.40491152,
0.00894281, 0.3199494 , 0.0228827 , -0.18515846, 0.32210535,
0.41672045, 0.1942389 , -0.4254937 , 0.07178113, 0.00740242,
0.23780417, -0.24449413, -0.15526545, -0.2200018 , -0.2426699 ,
-0.17750363, -0.16994882]], dtype=float32)>, <tf.Variable 'outer_layer_1/dense_1/bias:0' shape=(32,) dtype=float32, numpy=
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
dtype=float32)>]

3.其他网络层配置

使自己的网络层可以序列化

 class Linear(layers.Layer):

     def __init__(self, units=32, **kwargs):
super(Linear, self).__init__(**kwargs)
self.units = units def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b def get_config(self):
config = super(Linear, self).get_config()
config.update({'units':self.units})
return config layer = Linear(125)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
 {'name': 'linear_1', 'trainable': True, 'dtype': None, 'units': 125}

配置只有训练时可以执行的网络层

 class MyDropout(layers.Layer):
def __init__(self, rate, **kwargs):
super(MyDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
return tf.cond(training,
lambda: tf.nn.dropout(inputs, rate=self.rate),
lambda: inputs)

4.构建自己的模型
通常,我们使用Layer类来定义内部计算块,并使用Model类来定义外部模型 - 即要训练的对象。

Model类与Layer的区别:

它公开了内置的训练,评估和预测循环(model.fit(),model.evaluate(),model.predict())。
它通过model.layers属性公开其内层列表。
它公开了保存和序列化API。
下面通过构建一个变分自编码器(VAE),来介绍如何构建自己的网络。

class MyDropout(layers.Layer):
def __init__(self, rate, **kwargs):
super(MyDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
return tf.cond(training,
lambda: tf.nn.dropout(inputs, rate=self.rate),
lambda: inputs)
 class MyDropout(layers.Layer):
def __init__(self, rate, **kwargs):
super(MyDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
return tf.cond(training,
lambda: tf.nn.dropout(inputs, rate=self.rate),
lambda: inputs)
 class MyDropout(layers.Layer):
def __init__(self, rate, **kwargs):
super(MyDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
return tf.cond(training,
lambda: tf.nn.dropout(inputs, rate=self.rate),
lambda: inputs)

自己编写训练方法

 class MyDropout(layers.Layer):
def __init__(self, rate, **kwargs):
super(MyDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
return tf.cond(training,
lambda: tf.nn.dropout(inputs, rate=self.rate),
lambda: inputs)
 class MyDropout(layers.Layer):
def __init__(self, rate, **kwargs):
super(MyDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
return tf.cond(training,
lambda: tf.nn.dropout(inputs, rate=self.rate),
lambda: inputs) 如果还有问题未能得到解决,搜索887934385交流群,进入后下载资料工具安装包等。最后,感谢观看!

用keras构建自己的网络层 TensorFlow2.0教程的更多相关文章

  1. TensorFlow2.0教程-使用keras训练模型

    1.一般的模型构造.训练.测试流程 # 模型构造 inputs = keras.Input(shape=(784,), name='mnist_input') h1 = layers.Dense(64 ...

  2. (重磅)Internal: Failed to call ThenRnnForward with model config问题的解决(Keras 2.4.3和Tensorflow2.0系列)

    与此问题斗争了整整十天.win10,keras2.4.3,CUDA 10.1,CUDNN 7.6, tensorflow 2.3.0,驱动程序nvida 452 该问题出现在BiLSTM(GPU加速) ...

  3. TensorFlow2.0(11):tf.keras建模三部曲

    .caret, .dropup > .btn > .caret { border-top-color: #000 !important; } .label { border: 1px so ...

  4. 一文上手Tensorflow2.0之tf.keras(三)

    系列文章目录: Tensorflow2.0 介绍 Tensorflow 常见基本概念 从1.x 到2.0 的变化 Tensorflow2.0 的架构 Tensorflow2.0 的安装(CPU和GPU ...

  5. 基于tensorflow2.0 使用tf.keras实现Fashion MNIST

    本次使用的是2.0测试版,正式版估计会很快就上线了 tf2好像更新了蛮多东西 虽然教程不多 还是找了个试试 的确简单不少,但是还是比较喜欢现在这种写法 老样子先导入库 import tensorflo ...

  6. 『TensorFlow2.0正式版』TF2.0+Keras速成教程·零:开篇简介与环境准备

    此篇教程参考自TensorFlow 2.0 + Keras Crash Course,在原文的基础上进行了适当的总结与改编,以适应于国内开发者的理解与使用,水平有限,如果写的不对的地方欢迎大家评论指出 ...

  7. TensorFlow2.0提示Cannot find reference 'keras' in __init__.py

    使用TensorFlow2.0导入from tensorflow.keras import layers会出现Cannot find reference 'keras' in __init__.py提 ...

  8. Google工程师亲授 Tensorflow2.0-入门到进阶

    第1章 Tensorfow简介与环境搭建 本门课程的入门章节,简要介绍了tensorflow是什么,详细介绍了Tensorflow历史版本变迁以及tensorflow的架构和强大特性.并在Tensor ...

  9. TensorFlow2.0(9):TensorBoard可视化

    .caret, .dropup > .btn > .caret { border-top-color: #000 !important; } .label { border: 1px so ...

随机推荐

  1. How to use special characters in XML?

    https://dvteclipse.com/documentation/svlinter/How_to_use_special_characters_in_XML.3F.html Because X ...

  2. 使用 Anydesk 5.1 TCP 通道(端口映射)功能从外网方便访问内网的 web/数据库等资源

    Anydesk 5.1 带来一个新的功能:TCP 通道,在家办公时,通过互联网进行远程桌面连接到公司电脑,可以将家用电脑的某个端口,映射到公司网络的某个电脑( IP + 端口),不局限于被远程桌面连接 ...

  3. vue项目空格报错,缩进不对报错,格式报错!!!

    vue-cli构建项目之后发现写几句代码就会报错,但是语法什么的都没有问题,只是因为缩进.空格之类的,对于初学者格式不规范的人来说是相当难受的 图中框住的位置都会报错!! 现在有两种办法: 1.是因为 ...

  4. Python语法速查: 13. 操作系统服务

    返回目录 本篇索引 (1)sys模块 (2)os模块 (3)与Windows相关模块 (4)subprocess模块 (5)signal模块 (1)sys模块 sys模块用于Python解释器及其环境 ...

  5. rpm包安装java jar开机自启

    1.安装jdk: rpm -ivh jdk-8u201-linux-x64.rpm 2.配置jdk路径 打开/etc/profile增加以下内容: export JAVA_HOME=/usr/java ...

  6. 记录python上传文件的坑(2)

    描述: 1.之前在写项目mock代码时,碰到一个上传文件的接口,但项目接口本身有token保护机制,碰到token失效时,需要重新获取一次token后,再次对上传文件发起请求,在实际调用中发现,第一次 ...

  7. (转)Ioc控制反转和依赖注入

    转载地址:https://zhuanlan.zhihu.com/p/95869440 控制反转控制反转(Inversion of Control,简称IoC),是面向对象编程中的一种设计思想,其作用是 ...

  8. numpy函数查询手册

    写了个程序,对Numpy的绝大部分函数及其说明进行了中文翻译. 原网址:https://docs.scipy.org/doc/numpy/reference/routines.html#routine ...

  9. 关于python的中国历年城市天气信息爬取

    一.主题式网络爬虫设计方案(15分)1.主题式网络爬虫名称 关于python的中国城市天气网爬取 2.主题式网络爬虫爬取的内容与数据特征分析 爬取中国天气网各个城市每年各个月份的天气数据, 包括最高城 ...

  10. PyCharm 2019 2.3 软件安装教程(1.补丁破解2.破解码)

    一:补丁破解 PyCharm 2019 2.3 下载地址 https://pan.baidu.com/s/1HaWFcbO-x4vZuT6mVC0AGA 提取码:elu7 更多破解教程微信公众号关注“ ...