'''
Created on Apr 21, 2017 @author: P0079482
'''
#-*- coding:utf-8 -*-
import tensorflow as tf
#定义神经网络结构相关的参数
INPUT_NODE=784
OUTPUT_NODE=10
LAYER1_NODE=500 #通过tf.get_variable函数来获取变量。在训练神经网络时会创建这些变量:
#在测试时会通过保存的模型加载这些变量的取值。而且更加方便的是,因为可以在变量加载时
#将滑动平均变量重命名,所以可以直接通过同样的名字在训练时使用变量自身,
#而在测试时使用变量的滑动平均值。在这个函数中也会将变量的正则化损失加入损失集合
def get_weight_variable(shape,regularizer):
weights =tf.get_variable("weights",shape,initializer=tf.truncated_normal_initializer(stddev=0.1))
#当给出了正则化生成函数时,将当前变量的正则化损失加入名字为losses的集合。
#在这里使用了add_to_collection函数将一个张量加入一个集合,而这个集合的名称为losses
#这是自定义的集合,不在Tensorflow自动管理的集合列表中
if regularizer!=None:
tf.add_to_collection('losses',regularizer(weights))
return weights #定义神经网络的前向传播过程
def inference(input_tensor,regularizer):
#声明第一层神经网络的变量并完成前向传播过程
with tf.variable_scope('layer1'):
#这里通过tf.get_variable或tf.Variable没有本质区别,
#因为在训练或是测试中没有在同一个程序中多次调用这个函数,如果在同一个程序中多次调用,在第一次调用之后
#需要将reuse参数设置为True
weights=get_weight_variable([INPUT_NODE,LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1=tf.nn.relu(tf.matmul(input_tensor,weights)+biases)
#类似地声明第二层神经网络的变量并完成前向传播过程
with tf.variable_scope('layer2'):
weights=get_weight_variable([LAYER1_NODE,OUTPUT_NODE], regularizer)
biases=tf.get_variable("biases",[OUTPUT_NODE],initializer=tf.constant_initializer(0.0))
layer2=tf.matmul(layer1,weights)+biases
#返回最后前向传播的结果
return layer2 #在上面这段代码中定义了神经网络的前向传播算法。无论是训练还是测试时
#都可以直接调用inference这个函数,而不用关心具体的神经网络结构
#使用定义好的前行传播过程,以下代码给出了神经网络的训练程序mnist_train.py

上面的代码是mnist_inference.py。里边定义了网络的前向传播结果的计算,封装了权重的二范式的计算

'''
Created on 2017年4月21日 @author: weizhen
'''
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data # 加载mnist_inference.py中定义的常量和前向传播的函数
import mnist_inference # 配置神经网络的参数
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99 # 模型保存的路径和文件名
MODEL_SAVE_PATH = "/path/to/model/"
MODEL_NAME = "model.ckpt" def train(mnist):
# 定义输入输出placeholder
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input') regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
# 直接使用mnist_inference.py中定义的前向传播过程
y = mnist_inference.inference(x, regularizer)
global_step = tf.Variable(0, trainable=False) # 和5.2.1小节样例中类似地定义损失函数、学习率、滑动平均操作以及训练过程
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step);
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train') # 初始化Tensorflow持久化类
saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run() # 在训练过程中不再测试模型在验证数据上的表现,验证和测试的过程将会有一个独立的程序来完成
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs, y_:ys}) # 每1000轮保存一次模型
if i % 1000 == 0:
# 输出当前训练情况。这里只输出了模型在当前训练batch上的损失函数大小
# 通过损失函数的大小可以大概了解训练的情况。在验证数据集上的正确率信息
# 会有一个单独的程序来生成
print("After %d training step(s),loss on training batch is %g" % (step, loss_value)) # 保存当前的模型。注意这里给出了global_step参数,这样可以让每个被保存模型的文件末尾加上训练的轮数
# 比如"model.ckpt-1000"表示训练1000轮之后得到的模型
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) def main(argv=None):
mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
train(mnist) if __name__ == '__main__':
tf.app.run()

上面的代码是mnist_train.py里边

1、封装了网络滑动平均值的计算

2、网络误差的计算:信息的交叉熵+二范式的误差

3、误差的迭代优化算法,梯度下降

4、使用的激活函数是logist函数

5、对输出的结果进行了softmax处理,用来输出结果和真实值的交叉熵

6、持久化过程:将每隔1000次训练的结果保存到/path/to/model/文件夹下

7、然后在测试的时候读取这个结果graph,对测试数据进行测试

训练的结果如下所示

Extracting /tmp/data\train-images-idx3-ubyte.gz
Extracting /tmp/data\train-labels-idx1-ubyte.gz
Extracting /tmp/data\t10k-images-idx3-ubyte.gz
Extracting /tmp/data\t10k-labels-idx1-ubyte.gz
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "BestSplits" device_type: "CPU"') for unknown op: BestSplits
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "CountExtremelyRandomStats" device_type: "CPU"') for unknown op: CountExtremelyRandomStats
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "FinishedNodes" device_type: "CPU"') for unknown op: FinishedNodes
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "GrowTree" device_type: "CPU"') for unknown op: GrowTree
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "ReinterpretStringToFloat" device_type: "CPU"') for unknown op: ReinterpretStringToFloat
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "SampleInputs" device_type: "CPU"') for unknown op: SampleInputs
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "ScatterAddNdim" device_type: "CPU"') for unknown op: ScatterAddNdim
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "TopNInsert" device_type: "CPU"') for unknown op: TopNInsert
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "TopNRemove" device_type: "CPU"') for unknown op: TopNRemove
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "TreePredictions" device_type: "CPU"') for unknown op: TreePredictions
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "UpdateFertileSlots" device_type: "CPU"') for unknown op: UpdateFertileSlots
WARNING:tensorflow:From C:\Users\weizhen\workspace\TextUtil\mnist_train.py:49: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
After 1 training step(s),loss on training batch is 3.18871
After 1001 training step(s),loss on training batch is 0.236992
After 2001 training step(s),loss on training batch is 0.184191
After 3001 training step(s),loss on training batch is 0.141068
After 4001 training step(s),loss on training batch is 0.120774
After 5001 training step(s),loss on training batch is 0.11057
After 6001 training step(s),loss on training batch is 0.0951077
After 7001 training step(s),loss on training batch is 0.128405
After 8001 training step(s),loss on training batch is 0.0825269
After 9001 training step(s),loss on training batch is 0.0735326
After 10001 training step(s),loss on training batch is 0.0714079
After 11001 training step(s),loss on training batch is 0.0640013
After 12001 training step(s),loss on training batch is 0.0633993
After 13001 training step(s),loss on training batch is 0.0620059
After 14001 training step(s),loss on training batch is 0.0566063
After 15001 training step(s),loss on training batch is 0.0471778
After 16001 training step(s),loss on training batch is 0.0527759
After 17001 training step(s),loss on training batch is 0.0543462
After 18001 training step(s),loss on training batch is 0.0416694
After 19001 training step(s),loss on training batch is 0.0472751
After 20001 training step(s),loss on training batch is 0.0419436
After 21001 training step(s),loss on training batch is 0.0384671
After 22001 training step(s),loss on training batch is 0.0391581
After 23001 training step(s),loss on training batch is 0.0355204
After 24001 training step(s),loss on training batch is 0.0357683
After 25001 training step(s),loss on training batch is 0.0357087
After 26001 training step(s),loss on training batch is 0.0377925
After 27001 training step(s),loss on training batch is 0.0336796
After 28001 training step(s),loss on training batch is 0.0417044
After 29001 training step(s),loss on training batch is 0.0374377

对训练的模型进行测试:

'''
Created on 2017年4月22日 @author: weizhen
'''
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data #加载mnist_inference.py和mnist_train.py中定义的常量和函数
import mnist_inference
import mnist_train #每10秒加载一次最新的模型,并在测试数据上测试最新模型的正确率
EVAL_INTERVAL_SECS=10
def evaluate(mnist):
with tf.Graph().as_default() as g:
#定义输入输出的格式
x = tf.placeholder(tf.float32, [None,mnist_inference.INPUT_NODE], name='x-input')
y_= tf.placeholder(tf.float32, [None,mnist_inference.OUTPUT_NODE], name='y-input')
validate_feed={x:mnist.validation.images,y_:mnist.validation.labels} #直接通过调用封装好的函数来计算前向传播结果。因为测试时不关注正则化损失的值
#所以这里用于计算正则化损失的函数被设置为None
y=mnist_inference.inference(x, None) #使用前向传播的结果计算正确率。如果需要对未知的样例进行分析,那么使用
#tf.argmax(y,1)就可以得到输入样例的预测类别了
correct_prediction=tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #通过变量重命名的方式来加载模型,这样在前向传播的过程中就不需要调用求滑动平均的函数来获取平均值了
#这样就可以完全共用mnist_inference.py中定义的前向传播过程
variable_averages=tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variables_to_restore=variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore) #每隔EVAL_INTERVAL_SECS秒调用一次计算正确率的过程以检测训练过程中正确率的变化
while True:
with tf.Session() as sess:
#tf.train.get_checkpoint_state 函数会通过checkpoint文件自动找到目录中最新模型的文件名
ckpt=tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
#加载模型
saver.restore(sess, ckpt.model_checkpoint_path)
#通过文件名得到模型保存时迭代的轮数
global_step=ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score=sess.run(accuracy,feed_dict=validate_feed)
print("After %s training step(s) validation accuracy=%g"%(global_step,accuracy_score))
else:
print('No checkpoint file found')
return
time.sleep(EVAL_INTERVAL_SECS) def main(argv=None):
mnist=input_data.read_data_sets("/tmp/data",one_hot=True)
evaluate(mnist) if __name__=='__main__':
tf.app.run()

80、tensorflow最佳实践样例程序的更多相关文章

  1. TensorFlow最佳实践样例

    以下代码摘自<Tensor Flow:实战Google深度学习框架> 本套代码是在 http://www.cnblogs.com/shanlizi/p/9033330.html 基础上进行 ...

  2. 吴裕雄 python 神经网络——TensorFlow 完整神经网络样例程序

    import tensorflow as tf from numpy.random import RandomState batch_size = 8 w1= tf.Variable(tf.rando ...

  3. 在Ubuntu下构建Bullet以及执行Bullet的样例程序

    在Ubuntu下构建Bullet以及执行Bullet的样例程序 1.找到Bullet的下载页,地址是:https://code.google.com/p/bullet/downloads/list 2 ...

  4. SNF快速开发平台MVC-各种级联绑定方式,演示样例程序(包含表单和表格控件)

    做了这么多项目,经常会使用到级联.联动的情况. 如:省.市.县.区.一级分类.二级分类.三级分类.仓库.货位. 方式:有表单需要做级联的,还是表格行上需要做级联操作的. 实现:实现方法也有很多种方式. ...

  5. Tuxedo安装、配置、以及演示样例程序 (学习网址)

    Tuxedo安装.配置.以及演示样例程序 (学习网址): 1.http://liu9403.iteye.com/blog/1415684 2.http://www.cnblogs.com/fnng/a ...

  6. Java读取Excel文件(包括xls和xlsx)的样例程序

    样例程序如下所示,其中: parseXls()函数依赖于jxl,只能读取xls格式文件: parseExcel()函数依赖于apache poi,能够读取xls和xlsx两种格式的文件. jxl的依赖 ...

  7. 吴裕雄 python 神经网络——TensorFlow TFRecord样例程序

    import numpy as np import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_dat ...

  8. Python Socket 编程——聊天室演示样例程序

    上一篇 我们学习了简单的 Python TCP Socket 编程,通过分别写服务端和client的代码了解主要的 Python Socket 编程模型.本文再通过一个样例来加强一下对 Socket ...

  9. 3 TFRecord样例程序实战

    将图片数据写入Record文件 # 定义函数转化变量类型. def _int64_feature(value): return tf.train.Feature(int64_list=tf.train ...

随机推荐

  1. nb哒LCA

    求欧拉序每log分一块每段找最小值共n/log块然后建st表,复杂度n/log*log = n每块记前后缀最小过至少一块很好求对于在一块的:由于欧拉序的标号前后只会相差1所以序列种类只有2^k种k&l ...

  2. 网络安全随笔 - Linux的netstat查看端口 0.0.0.0与127.0.0.1的区别

    linux运维都需要对端口开放查看  netstat 就是对端口信息的查看 # netstat -nltp p 查看端口挂的程序 [root@iz2ze5is23zeo1ipvn65aiz ~]# n ...

  3. 大数据给IT企业带来攫金市场新机遇

    大数据给IT企业带来攫金市场新机遇 大数据,一个时髦的名词,也是当下热门的业务领域.大数据诱人的利益诉求点之一,即通过大数据能更好地提高效率,更好地有的放矢.一方面,大数据让公司内部更有效地运作:另一 ...

  4. python3 装饰器修复技术@wraps到底是什么?

    Python 装饰器中的@wraps的作用:    装饰器的作用:    在不改变原有功能代码的基础上,添加额外的功能,如用户验证等    @wraps(view_func)的作用:     不改变使 ...

  5. spring boot 尚桂谷学习笔记11 数据访问03 JPA

    整合JPA SpringData 程序数据交互结构图 (springdata jpa 默认使用 hibernate 进行封装) 使用之后就关注于 SpringData 不用再花多经历关注具体各个交互框 ...

  6. VS2013编译程序出现error C4996: 'std::_Fill_n': Function call with parameters that may be unsafe

    最近按照BiliBil网站Visual C++网络项目实战视频教程,使用VS2013编写一个基于MFC的对话框程序HttpSourceViewer,采用了WinHttp库.Boost xpressiv ...

  7. java基础知识-基本概念

    1.1 java语言有哪些优点? 1.java语言为纯面向对象的语言. 2.平台无关性.java语言的优点便是“一次编译,到处执行”.编译后的程序不会被平台所约束,因此java语言有很好的移植性. 3 ...

  8. python列表之添加、修改和删除元素

    修改列表中的元素: subject= ['math', 'Chinese', 'English'] subject[0] = 'history' # 列表名[要修改元素的下标]=修改后的元素 prin ...

  9. Django上线部署之uWSGI

    环境: 1.CentOS 7.2 64位 2.SQL Server 2016 Enterprise 64位 3.Python 3.6.5 64位 4.root用户 要求: 按照顺序部署 1.Windo ...

  10. HDU 3571 N-dimensional Sphere( 高斯消元+ 同余 )

    N-dimensional Sphere Time Limit: 2000/1000 MS (Java/Others)    Memory Limit: 65536/32768 K (Java/Oth ...