'''
A Bidirectional Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
''' from __future__ import print_function import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np # Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) '''
To classify images using a bidirectional recurrent neural network, we consider
every image row as a sequence of pixels. Because MNIST image shape is 28*28px,
we will then handle 28 sequences of 28 steps for every sample.
''' # Parameters
learning_rate = 0.001 # 可以理解为,训练时总共用的样本数
training_iters = 100000 # 每次训练的样本大小
batch_size = 128 # 这个是用来显示的。
display_step = 10 # Network Parameters
# n_steps*n_input其实就是那张图 把每一行拆到每个time step上。
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps # 隐藏层大小
n_hidden = 128 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits) # tf Graph input
# [None, n_steps, n_input]这个None表示这一维不确定大小
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes]) # Define weights
weights = {
# Hidden layer weights => 2*n_hidden because of forward + backward cells
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
} def BiRNN(x, weights, biases): # Prepare data shape to match `bidirectional_rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input) # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
# 变成了n_steps*(batch_size, n_input)
x = tf.unstack(x, n_steps, 1) # Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) # Get lstm cell output
try:
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
except Exception: # Old TensorFlow version only returns outputs not states
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32) # Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out'] pred = BiRNN(x, weights, biases) # Define loss and optimizer
# softmax_cross_entropy_with_logits:Measures the probability error in discrete classification tasks in which the classes are mutually exclusive
# return a 1-D Tensor of length batch_size of the same type as logits with the softmax cross entropy loss.
# reduce_mean就是对所有数值(这里没有指定哪一维)求均值。
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initializing the variables
init = tf.global_variables_initializer() # Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!") # Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))

官方关于bilstm的例子写的很清楚了。因为是第一次看,还是要查许多东西。尤其是数据处理方面。

数据的处理(https://segmentfault.com/a/1190000008793389)

拼接

t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
tf.stack([t1, t2], 0) ==> [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]
tf.stack([t1, t2], 1) ==> [[[1, 2, 3], [7, 8, 9]], [[4, 5, 6], [10, 11, 12]]]
tf.stack([t1, t2], 2) ==> [[[1, 7], [2, 8], [3, 9]], [[4, 10], [5, 11], [6, 12]]]

从shape的角度看:

t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) # [2,3] + [2,3] ==> [4, 3]
tf.concat([t1, t2], 1) # [2,3] + [2,3] ==> [2, 6]
tf.stack([t1, t2], 0) # [2,3] + [2,3] ==> [2*,2,3]
tf.stack([t1, t2], 1) # [2,3] + [2,3] ==> [2,2*,3]
tf.stack([t1, t2], 2) # [2,3] + [2,3] ==> [2,3,2*]

抽取:

input = [[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]] tf.gather(input, [0, 2]) ==> [[[1, 1, 1], [2, 2, 2]],
[[5, 5, 5], [6, 6, 6]]]

tensorflow bilstm官方示例的更多相关文章

  1. DotNetBar for Windows Forms 12.7.0.10_冰河之刃重打包版原创发布-带官方示例程序版

    关于 DotNetBar for Windows Forms 12.7.0.10_冰河之刃重打包版 --------------------11.8.0.8_冰河之刃重打包版------------- ...

  2. DotNetBar for Windows Forms 12.5.0.2_冰河之刃重打包版原创发布-带官方示例程序版

    关于 DotNetBar for Windows Forms 12.5.0.2_冰河之刃重打包版 --------------------11.8.0.8_冰河之刃重打包版-------------- ...

  3. DotNetBar for Windows Forms 12.2.0.7_冰河之刃重打包版原创发布-带官方示例程序版

    关于 DotNetBar for Windows Forms 12.2.0.7_冰河之刃重打包版 --------------------11.8.0.8_冰河之刃重打包版-------------- ...

  4. html5游戏引擎phaser官方示例学习

    首发:个人博客,更新&纠错&回复 phaser官方示例学习进行中,把官方示例调整为简明的目录结构,学习过程中加了点中文注释,代码在这里. 目前把官方的完整游戏示例看了一大半, brea ...

  5. 将百度坐标转换的javascript api官方示例改写成传统的回调函数形式

    改写前: 百度地图中坐标转换的JavaScript API示例官方示例如下: var points = [new BMap.Point(116.3786889372559,39.90762965106 ...

  6. ngRx 官方示例分析 - 3. reducers

    上一篇:ngRx 官方示例分析 - 2. Action 管理 这里我们讨论 reducer. 如果你注意的话,会看到在不同的 Action 定义文件中,导出的 Action 类型名称都是 Action ...

  7. ngRx 官方示例分析 - 2. Action 管理

    我们从 Action 名称开始. 解决 Action 名称冲突问题 在 ngRx 中,不同的 Action 需要一个 Action Type 进行区分,一般来说,这个 Action Type 是一个字 ...

  8. ngRx 官方示例分析 - 1. 介绍

    ngRx 的官方示例演示了在具体的场景中,如何使用 ngRx 管理应用的状态. 示例介绍 示例允许用户通过查询 google 的 book  API  来查询图书,并保存自己的精选书籍列表. 菜单有两 ...

  9. Ionic 2 官方示例程序 Super Starter

    原文发表于我的技术博客 本文分享了 Ionic 2 官方示例程序 Super Starter 的简要介绍与安装运行的方法,最好的学习示例代码,项目共包含了 14 个通用的页面设计,如:引导页.主页面详 ...

随机推荐

  1. bzoj

    准确率爆棚啊,然而

  2. BZOJ1009: [HNOI2008]GT考试 矩阵快速幂+kmp+dp

    这个题你发现打暴力的话可以记忆化搜素加剪枝,那么意味着可以递推,我们搜的话就是1010^9我们就往下匹配遇到匹配成功就return,那么我们可以想一下什么决定了状态,我们考虑kmp的过程,对于我们目前 ...

  3. java AWT repaint paint update 方法

    paint(Graphic g): awt调用这个方法有2种形式.程序驱动方式和系统驱动方式. 在系统驱动的情况下(比如界面第一次显示组件),系统会判断组件的显示区域,然后向事件分发线程发出调用pai ...

  4. [uva 1350]数位dp+二分

    题目链接:https://vjudge.net/problem/38405 #include<bits/stdc++.h> using namespace std; ][]; ]; lon ...

  5. POJ2195:Going Home (最小费用最大流)

    Going Home Time Limit: 1000MS   Memory Limit: 65536K Total Submissions: 26212   Accepted: 13136 题目链接 ...

  6. 如何用listview显示服务端数据

    https://www.cnblogs.com/caobotao/p/5061627.html

  7. Windows下查看某个端口被哪个服务占用

    1.查看某个端口是否被占用 打开命令行,输入:netstat -ano | findstr "3306" 2.查看端口被哪个服务占用 tasklist | findstr “PID ...

  8. js 内置对象属性及方法

    1.Date 属性(1): constructor      所建立对象的函数参考 prototype       能够为对象加入的属性和方法 方法(43): getDay()        返回一周 ...

  9. vue遇到的坑(一)——数组更新

    最近在项目中遇到个问题,数组已经更新了,但是页面中的DOM并没有触发变化.我一直以来的想法就是: 既然vue实现的实时数据双向绑定,那么在model层发生了变化之后为什么就没有在view层更新呢? 在 ...

  10. Android百度地图的使用

    做关于位置或者定位的app的时候免不了使用地图功能,本人最近由于项目的需求需要使用百度地图的一些功能,所以这几天研究了一下,现写一下blog记录一下,欢迎大家评论指正! 一.申请AK(API Key) ...