使用tensorflow 构建rnn网络
使用tensorflow实现了简单的rnn网络用来学习加法运算。
tensorflow 版本:1.1
import tensorflow as tf
from tensorflow.contrib import rnn
class RNN():
def __init__(self, input_dim , hidden_dim , step_num , class_num,learning_rate):
# # tf Graph input
self.x = tf.placeholder("float", [None, step_num, input_dim])
self.y = tf.placeholder("float", [None, class_num])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([hidden_dim, hidden_dim])),
'sigout':tf.Variable(tf.random_normal([hidden_dim , class_num]))
}
biases = {
'out': tf.Variable(tf.random_normal([hidden_dim])),
'sigout':tf.Variable(tf.random_normal([class_num]))
}
# Unstack to get a list of 'step_num' tensors of shape (batch_size, input_dim)
x_unstack = tf.unstack(self.x, step_num, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(hidden_dim, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x_unstack, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
pred = tf.matmul(outputs[-1], weights['out']) + biases['out']
sigmodout = tf.matmul(tf.nn.sigmoid(pred , 'relu') , weights['sigout']) + biases['sigout']
self.predict = tf.round(sigmodout)
correct_pred = tf.equal(self.predict , self.y)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
cost = tf.reduce_sum(tf.abs(tf.subtract(sigmodout , self.y)))
self.loss = cost
tf.summary.scalar('acc' , self.accuracy)
tf.summary.scalar('cost' , cost)
self.merge_all = tf.summary.merge_all()
import random
import numpy as np
class DataProcess():
def __init__(self):
self.point = 0
self.max_integer = 10000
self.max_len = len(self.filter(bin(self.max_integer))) + 1
def nextBatch(self,batch_size , fake = False ,is_test = False):
x_batch = []
y_batch = []
t_batch = []
for i in range( 0 , batch_size):
a = random.randint(0,self.max_integer)
b = random.randint(0,self.max_integer)
if fake:
a = 1
b = 1
c = a + b
abin = self.process(a)
bbin = self.process(b)
cbin = self.process(c)
xa = np.array(abin)
xb = np.array(bbin)
x_ = np.concatenate((xa , xb),axis= 0)
y_ = np.array(cbin)
x_batch.append(x_.reshape(2,self.max_len))
y_batch.append(cbin)
if is_test:
temp = []
temp.append(a)
temp.append(b)
t_batch.append(temp)
# if not is_test:
return np.array(x_batch) , np.array(y_batch),np.array(t_batch)
# else:
# return np.array(x_batch) , np.array(y_batch)
def process(self , str):
bstr = bin(str)
bstr = self.filter(bstr)
bstr = self.completion(bstr)
return bstr[::-1]
def filter(self , bstr):
return bstr.replace('0b' , '')
def completion(self , bstr):
lst = []
for num in list(bstr):
lst.append(int(num))
length = len(bstr)
for i in range(length , self.max_len):
lst.insert(0,0)
return[ (float)(i) for i in lst]
import tensorflow as tf
import time
import os
from src.bitplus.Process import DataProcess
from src.bitplus.rnn import RNN
dp = DataProcess()
tf.app.flags.DEFINE_float("learning_rate",0.01,'learning rate')
tf.app.flags.DEFINE_integer("training_iters",500000,'')
tf.app.flags.DEFINE_integer("batch_size",100,'')
tf.app.flags.DEFINE_integer("display_step",10,'')
tf.app.flags.DEFINE_integer("input_dim" , 2,'')
tf.app.flags.DEFINE_integer("steps_num" , dp.max_len,'')
tf.app.flags.DEFINE_integer("hidden_dim", dp.max_len * 2 ,'')
tf.app.flags.DEFINE_integer("class_num" , dp.max_len,'')
tf.app.flags.DEFINE_string("rnn_model",'rnn.model','')
FLAGS = tf.app.flags.FLAGS
logfolder = time.strftime("%Y%m%d_%H%M%S", time.localtime())
def train():
rnn = RNN(FLAGS.input_dim , FLAGS.hidden_dim ,FLAGS.steps_num , FLAGS.class_num , FLAGS.learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate).minimize(rnn.loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
train_writer = tf.summary.FileWriter('../LOGS/'+logfolder,sess.graph)
step = 1
while step * FLAGS.batch_size < FLAGS.training_iters:
batch_x, batch_y ,_= dp.nextBatch(FLAGS.batch_size , False)
batch_x = batch_x.transpose((0, 2, 1))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={rnn.x: batch_x, rnn.y: batch_y})
if step % FLAGS.display_step == 0:
# Calculate batch loss
merge_all , loss , acc,predict= sess.run([rnn.merge_all ,rnn.loss ,rnn.accuracy,rnn.predict],feed_dict={rnn.x: batch_x, rnn.y: batch_y})
print("Iter " + str(step*FLAGS.batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
train_writer.add_summary(merge_all , step)
step += 1
print("Optimization Finished!")
train_writer.close()
saver = tf.train.Saver(tf.all_variables())
if not os.path.exists('model'):
os.mkdir('model')
saver.save(sess, './model/' + FLAGS.rnn_model)
def tModel(model):
rnn = RNN(FLAGS.input_dim , FLAGS.hidden_dim ,FLAGS.steps_num , FLAGS.class_num , FLAGS.learning_rate)
saver = tf.train.Saver()
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
saver.restore(sess,model)
batch_x , batch_y , batch_test = dp.nextBatch(100 , False,True)
batch_x = batch_x.transpose((0, 2, 1))
predict = sess.run([rnn.predict],feed_dict={rnn.x: batch_x, rnn.y: batch_y})
for ndx in range(len(predict[0])):
print('%s + %s = %d(%d)'%(batch_test[ndx][0] , batch_test[ndx][1] , batch_test[ndx][0] + batch_test[ndx][1],bin2Ten(predict[0][ndx])))
print('pause!')
def bin2Ten(bin):
lst = []
for i in bin[::-1]:
lst.append(str(int(i)))
return int(''.join(lst) , base=2)
def main(_):
print('start...')
print('max len:' , dp.max_len)
# train()
tModel('./model/' + FLAGS.rnn_model)
if __name__ == '__main__':
tf.app.run()
使用tensorflow 构建rnn网络的更多相关文章
- TensorFlow之RNN:堆叠RNN、LSTM、GRU及双向LSTM
RNN(Recurrent Neural Networks,循环神经网络)是一种具有短期记忆能力的神经网络模型,可以处理任意长度的序列,在自然语言处理中的应用非常广泛,比如机器翻译.文本生成.问答系统 ...
- TensorFlow 实现 RNN 入门教程
转子:https://www.leiphone.com/news/201705/zW49Eo8YfYu9K03J.html 最近在看RNN模型,为简单起见,本篇就以简单的二进制序列作为训练数据,而不实 ...
- 第二十二节,TensorFlow中RNN实现一些其它知识补充
一 初始化RNN 上一节中介绍了 通过cell类构建RNN的函数,其中有一个参数initial_state,即cell初始状态参数,TensorFlow中封装了对其初始化的方法. 1.初始化为0 对于 ...
- 第二十节,使用RNN网络拟合回声信号序列
这一节使用TensorFlow中的函数搭建一个简单的RNN网络,使用一串随机的模拟数据作为原始信号,让RNN网络来拟合其对应的回声信号. 样本数据为一串随机的由0,1组成的数字,将其当成发射出去的一串 ...
- 深度学习原理与框架-递归神经网络-RNN_exmaple(代码) 1.rnn.BasicLSTMCell(构造基本网络) 2.tf.nn.dynamic_rnn(执行rnn网络) 3.tf.expand_dim(增加输入数据的维度) 4.tf.tile(在某个维度上按照倍数进行平铺迭代) 5.tf.squeeze(去除维度上为1的维度)
1. rnn.BasicLSTMCell(num_hidden) # 构造单层的lstm网络结构 参数说明:num_hidden表示隐藏层的个数 2.tf.nn.dynamic_rnn(cell, ...
- 深度学习原理与框架-递归神经网络-RNN网络基本框架(代码?) 1.rnn.LSTMCell(生成单层LSTM) 2.rnn.DropoutWrapper(对rnn进行dropout操作) 3.tf.contrib.rnn.MultiRNNCell(堆叠多层LSTM) 4.mlstm_cell.zero_state(state初始化) 5.mlstm_cell(进行LSTM求解)
问题:LSTM的输出值output和state是否是一样的 1. rnn.LSTMCell(num_hidden, reuse=tf.get_variable_scope().reuse) # 构建 ...
- 解读tensorflow之rnn
from: http://lan2720.github.io/2016/07/16/%E8%A7%A3%E8%AF%BBtensorflow%E4%B9%8Brnn/ 这两天想搞清楚用tensorfl ...
- 解读tensorflow之rnn 的示例 ptb_word_lm.py
这两天想搞清楚用tensorflow来实现rnn/lstm如何做,但是google了半天,发现tf在rnn方面的实现代码或者教程都太少了,仅有的几个教程讲的又过于简单.没办法,只能亲自动手一步步研究官 ...
- language model ——tensorflow 之RNN
代码结构 tf的代码看多了之后就知道其实官方代码的这个结构并不好: graph的构建和训练部分放在了一个文件中,至少也应该分开成model.py和train.py两个文件,model.py中只有一个P ...
随机推荐
- EasyUI 创建对话框
对话框是特殊的窗口,它能包括上面的工具栏和下面的按钮.默认对话框不能改变大小,但是用户可以设置resizeable属性为true来使它可以被改变大小:对话框非常简单,可以使用DIV标记创建: < ...
- MapReduce 中的两表 join 几种方案简介
转自:http://my.oschina.net/leejun2005/blog/95186 MapSideJoin例子:http://my.oschina.net/leejun2005/blog/1 ...
- php 显示一个干净的,易被解析的json
header("Content-type: text/html; charset=utf-8"); //试着从数据库里读取一条数据放进来 $con = mysql_connect( ...
- 察看下列JSP内容
察看下列JSP内容 <html><body> <% for (int i=0;i<3;i++){ %> out.print(i*2); <% } %&g ...
- i2c 异常之i2c1 prob 检测超时
在没加atl 的fpga 时 i2c1上的tvp5150 vpss驱动加载没问题, 加了之后出现超时 I2C: timed out in wait_for_bb: I2C_IRQSTATUS=1000 ...
- POJ 3181 Dollar Dayz 01全然背包问题
01全然背包问题. 主要是求有多少种组合.二维dp做的人多了,这里使用一维dp就能够了. 一维的转换方程:dp[j] = dp[j-i] + dp[j];当中i代表重量,j代表当前背包容量. 意思就是 ...
- "reason":"No handler for type [attachment] declared on field [file]" 最完全解决方案
0.elasticsearch-mapper-attachments 2.3.4安装 mapper-attachments安装方法分两类,在线和离线: 在线安装 bin/elasticsearch-p ...
- 对Python线程池
本文对Python线程池进行详细说明介绍,IDE选择及编码的解决方案进行了一番详细的描述,实为Python初学者必读的Python学习经验心得. AD: 干货来了,不要等!WOT2015 北京站演讲P ...
- 多线程环境下调用 HttpWebRequest 并发连接限制
.net 的 HttpWebRequest 或者 WebClient 在多线程情况下存在并发连接限制,这个限制在桌面操作系统如 windows xp , windows 7 下默认是2,在服务器操作 ...
- Android动态禁用或开启屏幕旋转工具
package com.gwtsz.gts2.util; import android.content.Context; import android.provider.Settings; impor ...