import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500 def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights def inference(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2 BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "E:\\MNIST_model\\"
MODEL_NAME = "mnist_model" def train(mnist):
# 定义输入输出placeholder。
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input') regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = inference(x, regularizer)
global_step = tf.Variable(0, trainable=False) # 定义损失函数、学习率、滑动平均操作以及训练过程。
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train') # 初始化TensorFlow持久化类。
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) def main(argv=None):
mnist = input_data.read_data_sets("E:\\MNIST_data\\", one_hot=True)
train(mnist) if __name__ == '__main__':
main()

import os
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500 BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "E:\\MNIST_model\\"
MODEL_NAME = "mnist_model" def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights def inference(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2 # 加载的时间间隔。
EVAL_INTERVAL_SECS = 10 def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels} y = inference(x, None)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore) while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
else:
print('No checkpoint file found')
return
time.sleep(EVAL_INTERVAL_SECS) def main(argv=None):
mnist = input_data.read_data_sets("E:\\MNIST_data\\", one_hot=True)
evaluate(mnist) if __name__ == '__main__':
main()

吴裕雄 python 神经网络——TensorFlow训练神经网络:MNIST最佳实践的更多相关文章

  1. 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用滑动平均

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...

  2. 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用隐藏层

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...

  3. 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用激活函数

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...

  4. 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用指数衰减的学习率

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...

  5. 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用正则化

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...

  6. 吴裕雄 python 神经网络——TensorFlow训练神经网络:全模型

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...

  7. TensorFlow入门之MNIST最佳实践

    在上一篇<TensorFlow入门之MNIST样例代码分析>中,我们讲解了如果来用一个三层全连接网络实现手写数字识别.但是在实际运用中我们需要更有效率,更加灵活的代码.在TensorFlo ...

  8. TensorFlow入门之MNIST最佳实践-深度学习

    在上一篇<TensorFlow入门之MNIST样例代码分析>中,我们讲解了如果来用一个三层全连接网络实现手写数字识别.但是在实际运用中我们需要更有效率,更加灵活的代码.在TensorFlo ...

  9. 吴裕雄 python 神经网络——TensorFlow训练神经网络:花瓣识别

    import os import glob import os.path import numpy as np import tensorflow as tf from tensorflow.pyth ...

随机推荐

  1. 题解【BZOJ4145】「AMPPZ2014」The Prices

    题目描述 你要购买 \(m\) 种物品各一件,一共有 \(n\) 家商店,你到第 \(i\) 家商店的路费为 \(d[i]\),在第 \(i\) 家商店购买第 \(j\) 种物品的费用为 \(c[i] ...

  2. 并发之ATOMIC原子操作--Unsafe解析(三)

    Atomic 类的原子操作是依赖java中的魔法类sun.misc.Unsafe来实现的,而这个类为我们提供了访问底层的机制,这种机制仅供java核心类库使用,而不应该被普通用户使用. 获取Unsaf ...

  3. 问题 B: 奇怪的电梯

    问题 B: 奇怪的电梯 时间限制: 1 Sec  内存限制: 128 MB[命题人:admin] 题目描述 大楼的每一层楼都可以停电梯,而且第i层楼(1<=i<=N)上有一个数字Ki(0& ...

  4. Linux - Shell - find - 基础

    概述 find 基础 背景 查找文件 人的记忆能力, 是有限的 计算机里的文件数量, 虽然不是无限, 但是也不少 要去找那些 记不清楚的文件, 必然要用查找 准备 OS centos7 用户 root ...

  5. Linux使用mount挂载samba共享文件夹

    挂载smb的目录,使用读写644权限 mount -t cifs -o "rw,dir_mode=0644,file_mode=0644,username=username,password ...

  6. 每天进步一点点------Allegro中SYMBOL种类

    Allegro 中SYMBOL 种类在Allegro 中, Symbol 有五种, 它们分别是Package Symbol .Mechanical Symbol.Format Symbol.Shape ...

  7. bugku 管理员系统

    这一个是伪造ip X-FORWARDED-FOR:127.0.0.1 用到了XFF头 首先打开网站会发现一个登录界面 然后用开发者工具看一下 后台会发现一串用base64加密的密文 用base64解密 ...

  8. 后台怎么区分接口要写在哪个service类中呢(根据service服务区分)

    1,明确页面要实现什么功能,则页面对应的controller要写对应的controller方法 2,这个功能最终要由谁实现完成,在对应的service中药实现这个功能 3,这个接口的实现就写在最终完成 ...

  9. 吴裕雄 python 机器学习——集成学习随机森林RandomForestRegressor回归模型

    import numpy as np import matplotlib.pyplot as plt from sklearn import datasets,ensemble from sklear ...

  10. 大小端(MSB & LSB)

    谈到字节序的问题,必然牵涉到两大CPU派系.那就是Motorola的PowerPC系列CPU和Intel的x86系列CPU.PowerPC系列采用big endian方式存储数据,而x86系列则采用l ...