2.4scope
name_scope
variable_scope
scope (name_scope/variable_scope)
from __future__ import print_function
import tensorflow as tf with tf.name_scope("a_name_scope"):
initializer = tf.constant_initializer(value=1)
var1 = tf.get_variable(name='var1', shape=[1], dtype=tf.float32, initializer=initializer)
var2 = tf.Variable(name='var2', initial_value=[2], dtype=tf.float32)
var21 = tf.Variable(name='var2', initial_value=[2.1], dtype=tf.float32)
var22 = tf.Variable(name='var2', initial_value=[2.2], dtype=tf.float32) with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print(var1.name) # var1:0 此种get_variable对于name_scope无效
print(sess.run(var1)) # [ 1.]
print(var2.name) # a_name_scope/var2:0
print(sess.run(var2)) # [ 2.]
print(var21.name) # a_name_scope/var2_1:0
print(sess.run(var21)) # [ 2.0999999]
print(var22.name) # a_name_scope/var2_2:0
print(sess.run(var22)) # [ 2.20000005] with tf.variable_scope("a_variable_scope") as scope:
initializer = tf.constant_initializer(value=3)
var3 = tf.get_variable(name='var3', shape=[1], dtype=tf.float32, initializer=initializer)
var4 = tf.Variable(name='var4', initial_value=[4], dtype=tf.float32)
var4_reuse = tf.Variable(name='var4', initial_value=[4], dtype=tf.float32)
scope.reuse_variables() #定义了可重复利用
var3_reuse = tf.get_variable(name='var3',) with tf.Session() as sess:
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
print(var3.name) # a_variable_scope/var3:0
print(sess.run(var3)) # [ 3.]
print(var4.name) # a_variable_scope/var4:0
print(sess.run(var4)) # [ 4.]
print(var4_reuse.name) # a_variable_scope/var4_1:0
print(sess.run(var4_reuse)) # [ 4.]
print(var3_reuse.name) # a_variable_scope/var3:0
print(sess.run(var3_reuse)) # [ 3.]
通常在RNN中有一个重复循环机制,比如在training中和test中的结构是不同的,但是在两者的参数是相同的时候,就可以用到
scope.reuse_variables()
# visit https://morvanzhou.github.io/tutorials/ for more! # 22 scope (name_scope/variable_scope)
from __future__ import print_function
import tensorflow as tf class TrainConfig:
batch_size = 20
time_steps = 20
input_size = 10
output_size = 2
cell_size = 11
learning_rate = 0.01 class TestConfig(TrainConfig):
time_steps = 1 class RNN(object): def __init__(self, config):
self._batch_size = config.batch_size
self._time_steps = config.time_steps
self._input_size = config.input_size
self._output_size = config.output_size
self._cell_size = config.cell_size
self._lr = config.learning_rate
self._built_RNN() def _built_RNN(self):
with tf.variable_scope('inputs'):
self._xs = tf.placeholder(tf.float32, [self._batch_size, self._time_steps, self._input_size], name='xs')
self._ys = tf.placeholder(tf.float32, [self._batch_size, self._time_steps, self._output_size], name='ys')
with tf.name_scope('RNN'):
with tf.variable_scope('input_layer'):
l_in_x = tf.reshape(self._xs, [-1, self._input_size], name='2_2D') # (batch*n_step, in_size)
# Ws (in_size, cell_size)
Wi = self._weight_variable([self._input_size, self._cell_size])
print(Wi.name)
# bs (cell_size, )
bi = self._bias_variable([self._cell_size, ])
# l_in_y = (batch * n_steps, cell_size)
with tf.name_scope('Wx_plus_b'):
l_in_y = tf.matmul(l_in_x, Wi) + bi
l_in_y = tf.reshape(l_in_y, [-1, self._time_steps, self._cell_size], name='2_3D') with tf.variable_scope('cell'):
cell = tf.contrib.rnn.BasicLSTMCell(self._cell_size)
with tf.name_scope('initial_state'):
self._cell_initial_state = cell.zero_state(self._batch_size, dtype=tf.float32) self.cell_outputs = []
cell_state = self._cell_initial_state
for t in range(self._time_steps):
if t > 0: tf.get_variable_scope().reuse_variables()
cell_output, cell_state = cell(l_in_y[:, t, :], cell_state)
self.cell_outputs.append(cell_output)
self._cell_final_state = cell_state with tf.variable_scope('output_layer'):
# cell_outputs_reshaped (BATCH*TIME_STEP, CELL_SIZE)
cell_outputs_reshaped = tf.reshape(tf.concat(1, self.cell_outputs), [-1, self._cell_size])
Wo = self._weight_variable((self._cell_size, self._output_size))
bo = self._bias_variable((self._output_size,))
product = tf.matmul(cell_outputs_reshaped, Wo) + bo
# _pred shape (batch*time_step, output_size)
self._pred = tf.nn.relu(product) # for displacement with tf.name_scope('cost'):
_pred = tf.reshape(self._pred, [self._batch_size, self._time_steps, self._output_size])
mse = self.ms_error(_pred, self._ys)
mse_ave_across_batch = tf.reduce_mean(mse, 0)
mse_sum_across_time = tf.reduce_sum(mse_ave_across_batch, 0)
self._cost = mse_sum_across_time
self._cost_ave_time = self._cost / self._time_steps with tf.name_scope('trian'):
self._lr = tf.convert_to_tensor(self._lr)
self.train_op = tf.train.AdamOptimizer(self._lr).minimize(self._cost) @staticmethod
def ms_error(y_pre, y_target):
return tf.square(tf.sub(y_pre, y_target)) @staticmethod
def _weight_variable(shape, name='weights'):
initializer = tf.random_normal_initializer(mean=0., stddev=0.5, )
return tf.get_variable(shape=shape, initializer=initializer, name=name) @staticmethod
def _bias_variable(shape, name='biases'):
initializer = tf.constant_initializer(0.1)
return tf.get_variable(name=name, shape=shape, initializer=initializer) if __name__ == '__main__':
train_config = TrainConfig()
test_config = TestConfig() # the wrong method to reuse parameters in train rnn
with tf.variable_scope('train_rnn'):
train_rnn1 = RNN(train_config) #参数在train和test都是一致的
with tf.variable_scope('test_rnn'):
test_rnn1 = RNN(test_config) #参数在train和test都是一致的
# the right method to reuse parameters in train rnn
with tf.variable_scope('rnn') as scope:
sess = tf.Session()
train_rnn2 = RNN(train_config)
scope.reuse_variables()
test_rnn2 = RNN(test_config)
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
2.4scope的更多相关文章
随机推荐
- Get started with ros -- 1
原创博文:转载请标明出处(周学伟):http://www.cnblogs.com/zxouxuewei/tag/ 一.Introduction: 机器人操作系统(ROS)是使机器人系统的不同部分能够发 ...
- python 中dir()和__dict__的区别
Python __dict__与dir() 出处(http://blog.csdn.net/lis_12/article/details/53521554). Python下一切皆对象,每个对象都有多 ...
- highcharts图表中级入门:非histock图表的highcharts图表如何让图表产生滚动条
最近highcharts图表讨论群里面很多朋友都在问如何让highcharts图表在X轴数据多的情况下产生滚动条的问题,其实之前有一个解决办法是将装载图表的div容器用css样式表弄一个滚动条出来.这 ...
- (转)base64编码是怎么工作的?
按:在PHP中级班的课堂上,有位同学问这样一个问题:“我在用 base64_encode 对用户名进行编码时,会出来等号,是不是可以去掉?”跟我来看完这篇文章,答案即揭晓. 1: 为什么需要base6 ...
- error LNK2038: 检测到“_MSC_VER”的不匹配项: 值“1600”不匹配值“1800”
_MSC_VER 定义编译器的版本.下面是一些编译器版本的_MSC_VER值:MS VC++ 10.0 _MSC_VER = 1600MS VC++ 9.0 _MSC_VER = 1500MS VC+ ...
- openvpn记住用户名和密码,自动连接
1, 打开openvpn安装目录 2, 在config目录中, 找到VPN服务器的配置文件, 我的是config.ovpn,将 auth-user-pass (若已经存在)改为 auth-user-p ...
- Bypass D盾_IIS防火墙SQL注入防御(多姿势)
0X01 前言 D盾_IIS防火墙,目前只支持Win2003服务器,前阵子看见官方博客说D盾新版将近期推出,相信功能会更强大,这边分享一下之前的SQL注入防御的测试情况.D盾_IIS防火墙注入防御策略 ...
- C++ template —— 模板与继承(八)
16.1 命名模板参数许多模板技术往往让类模板拖着一长串类型参数:不过许多参数都设有合理的缺省值,如: template <typename policy1 = DefaultPolicy1, ...
- linux系统环境搭建
一.安装jdk 参考帖子 用yum安装JDK(CentOS) 1.查看yum库中都有哪些jdk版本 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 [r ...
- VC利用调试寄存器实现硬件断点源码
[文章标题]:VC利用调试寄存器实现硬件断点源码 [文章作者]:yhswwr(SilenceRet) [作者QQ]:3412259 [编写语言]:C++ [使用工具]:VS2008.VC++9 [本文 ...