LSTM代码
tensorflow的关于LSTM的代码,经过反复的调试和修改,终于运行成功了,可以把训练过程的结果保存起来,然后预测的时候直接取出来。花了很长时间才把官网上的代码调试成功,里面的坑有很多需要填补,还有源代码,都需要认真解读,关于tensorflow的高级结构,比如队列和多线程,也涉及到了。
import time
import numpy as np
import tensorflow as tf
import tensorflow.models.rnn.ptb.reader as reader # flags = tf.flags
# logging = tf.logging
# flags.DEFINE_string("save_path", None,
# "Model output directory.")
# flags.DEFINE_bool("use_fp16", False,
# "Train using 16-bit floats instead of 32bit floats")
# FLAGS = flags.FLAGS
# def data_type():
# return tf.float16 if FLAGS.use_fp16 else tf.float32
class PTBInput(object):
"""The input data.""" def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size)) // num_steps
self.x, self.y = input,target = reader.ptb_producer(data,batch_size,num_steps)
self.input_data = tf.placeholder(shape=[batch_size,num_steps],dtype=tf.int32)
self.targets = tf.placeholder(shape=[batch_size,num_steps],dtype=tf.int32) class PTBModel(object):
"""The PTB model.""" def __init__(self, is_training, config, input_):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size # Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
def lstm_cell():
return tf.nn.rnn_cell.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True) attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.nn.rnn_cell.DropoutWrapper(
lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.nn.rnn_cell.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, tf.float32)
print('initial_state:',self._initial_state)
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=tf.float32)
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.nn.rnn(cell, inputs,
# initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(1, outputs), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=tf.float32)
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.nn.seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=tf.float32)])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
tf.add_to_collection("final_state",self._final_state)
print("state:",state)
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr) self.saver = saver = tf.train.Saver() def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value}) @property
def input(self):
return self._input @property
def initial_state(self):
return self._initial_state @property
def cost(self):
return self._cost @property
def final_state(self):
return self._final_state @property
def lr(self):
return self._lr @property
def train_op(self):
return self._train_op class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000 class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000 class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000 class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000 def run_epoch(session, model,data,eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h x,y = session.run([model.input.x,model.input.y])
feed_dict[model.input.input_data] = x
feed_dict[model.input.targets] = y vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters) #获取句子的向量
def predict(session,model,data,verbose=False):
result = []#存储表示句子的向量
state = session.run(model.initial_state) saver = tf.train.import_meta_graph("E:/LSTM/models/model.ckpt.meta")
saver.restore(session,"E:/LSTM/models/model.ckpt")
final_state = tf.get_collection("final_state")[0]
fetches = {"final_state":final_state}
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h x, y = session.run([model.input.x, model.input.y])
feed_dict[model.input.input_data] = x
feed_dict[model.input.targets] = y
vals = session.run(fetches, feed_dict)
result.append(vals[-1].h)
print(vals[-1].h)
return result raw_data = reader.ptb_raw_data('E:/LSTM/simple-examples/data/')
train_data, valid_data, test_data, _ = raw_data config = SmallConfig()
eval_config = SmallConfig()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default() as g:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with g.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
# tf.scalar_summary("Training Loss", m.cost)
# tf.scalar_summary("Learning Rate", m.lr)
with g.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
# tf.scalar_summary("Validation Loss", mvalid.cost)
with g.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input) sv = tf.train.Supervisor()
with sv.managed_session() as session:
summary_writer = tf.train.SummaryWriter('E:/LSTM/lstm_logs', session.graph)
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, data=train_data,eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid,data=valid_data)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) m.saver.save(session, "E:/LSTM/models/model.ckpt")
sentences = predict(session, mtest,data=test_data)#获取句子向量
LSTM代码的更多相关文章
- tensorflow笔记:多层LSTM代码分析
tensorflow笔记:多层LSTM代码分析 标签(空格分隔): tensorflow笔记 tensorflow笔记系列: (一) tensorflow笔记:流程,概念和简单代码注释 (二) ten ...
- Tensorflow多层LSTM代码分析
1.tf.Graph() 你一旦开始你的任务,就已经有一个默认的图已经创建好了.而且可以通过调用tf.get_default_graph()来访问到. 添加一个操作到默认的图里面,只要简单的调用一个定 ...
- 深度学习原理与框架-递归神经网络-RNN网络基本框架(代码?) 1.rnn.LSTMCell(生成单层LSTM) 2.rnn.DropoutWrapper(对rnn进行dropout操作) 3.tf.contrib.rnn.MultiRNNCell(堆叠多层LSTM) 4.mlstm_cell.zero_state(state初始化) 5.mlstm_cell(进行LSTM求解)
问题:LSTM的输出值output和state是否是一样的 1. rnn.LSTMCell(num_hidden, reuse=tf.get_variable_scope().reuse) # 构建 ...
- Theano:LSTM源码解析
最难读的Theano代码 这份LSTM代码的作者,感觉和前面Tutorial代码作者不是同一个人.对于Theano.Python的手法使用得非常娴熟. 尤其是在两重并行设计上: ①LSTM各个门之间并 ...
- 第二十一节,使用TensorFlow实现LSTM和GRU网络
本节主要介绍在TensorFlow中实现LSTM以及GRU网络. 一 LSTM网络 Long Short Term 网络—— 一般就叫做 LSTM ——是一种 RNN 特殊的类型,可以学习长期依赖信息 ...
- TensorFlow入门(五)多层 LSTM 通俗易懂版
欢迎转载,但请务必注明原文出处及作者信息. @author: huangyongye @creat_date: 2017-03-09 前言: 根据我本人学习 TensorFlow 实现 LSTM 的经 ...
- tensorflow笔记:多层CNN代码分析
tensorflow笔记系列: (一) tensorflow笔记:流程,概念和简单代码注释 (二) tensorflow笔记:多层CNN代码分析 (三) tensorflow笔记:多层LSTM代码分析 ...
- tensorflow笔记:流程,概念和简单代码注释
tensorflow是google在2015年开源的深度学习框架,可以很方便的检验算法效果.这两天看了看官方的tutorial,极客学院的文档,以及综合tensorflow的源码,把自己的心得整理了一 ...
- TensorFlow技术解析与实战学习笔记(15)-----MNIST识别(LSTM)
一.任务:采用基本的LSTM识别MNIST图片,将其分类成10个数字. 为了使用RNN来分类图片,将每张图片的行看成一个像素序列,因为MNIST图片的大小是28*28像素,所以我们把每一个图像样本看成 ...
随机推荐
- EasyUI datagrid-export 将datagrid的数据导出至Excel-解决科学计数法
通过EasyUI datagrid-export.js 将datagrid的数据导出至Excel的时候,如果有类似身份证一样很长的数字,需要在后台返回数据的时候在数字前增加一个 “ ”,将数字转为字符 ...
- mybatis中用注解如何处理存储过程返回的多个结果集?
sql代码: create procedure sptest.getnamesanditems() reads sql data dynamic result sets 2 BEGIN ATOMIC ...
- java List深拷贝示例
示例一 import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java ...
- nodejs vue的安装
1.https://nodejs.org/en/ 下载最新版nodejs 2.安装好后win+R输入cmd(管理员权限键入):node -v(node版本)npm -v(npm版本)查看版本号,如图所 ...
- 使用MQ消息队列的优缺点
前言 公司的项目一直都是在使用MQ的,但是由于使用的功能很简单,所以一直都是知其然不知其所以然,作为一个程序猿有必要了解每一个使用的技术,为什么使用它?它的优点是什么?缺点是什么?等等... 使用mq ...
- 渐进增强(progressive enhancement)、优雅降级(graceful degradation)
渐进增强 progressive enhancement: 针对低版本浏览器进行构建页面,保证最基本的功能,然后再针对高级浏览器进行效果.交互等改进和追加功能达到更好的用户体验. 优雅降级 grace ...
- 网址URL分解
http://www.joymood.cn:8080/test.php?user=admin&pwd=admin#login 1.location.href:得到整个如上的完整url 2.lo ...
- 汽配生产的精益化管理如何实现?这家3000人的企业靠MES系统进行管理
精益达电子事业部电子车间于在完成车间改造后,生产能力得到大幅提升.但生产制造过程信息化仍处于空白,众多设备处于单机工作模式,车间现场计划排产.物料管理.质量管理等,还处于原始的凭经验.人工干预方式. ...
- SAP CDS redirect view支持写操作吗,一个实验来验证
According to this wiki, write back on CDS view is not supported: And also it is defined in ABAP help ...
- etcd和flannel实现docker跨物理机通信
实验目标 跨物理机的容器之间能直接访问docker通过Flannel可以实现各容器间的相互通信,即宿主机和容器,容器和容器之间都能相互通信 实验环境 192.168.3.50 //etcd.flann ...