主要是四个文件

mnist_train.py

#coding: utf-8
import os import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data import mnist_inference BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS =10000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./mobilenet_v1_model/"
MODEL_NAME = "model.ckpt"
channels = 1 def train_MLP(mnist):
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE) y = mnist_inference.inference_MLP(x, regularizer) global_step = tf.Variable(0, trainable=False) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train') saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run() for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
# print os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) def train_mobilenet(mnist):
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE) #mobilenet 把输入数据变成与w矩阵同纬度的
x_image = tf.reshape(x, [-1,28,28,1])
x_image = tf.image.resize_image_with_crop_or_pad(x_image, 28*4,28*4)
y = mnist_inference.inference_mobilenet(x_image, regularizer) global_step = tf.Variable(0, trainable=False) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean #+ tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train') saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run() for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
# print os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
else:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value)) def main(argv=None):
mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
train_mobilenet(mnist) if __name__ == '__main__':
tf.app.run()

mnist_eval.py

#coding: utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data import mnist_inference
import mnist_train #every 10 sec load the newest model
EVAL_INTERVAL_SECS = 10 def evaluate_MLP(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels} y = mnist_inference.inference(x, None) correcgt_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correcgt_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variable_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variable_to_restore) #while True:
if 1:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
#load the model
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training steps, validation accuracy = %g" % (global_step, accuracy_score)) else:
print('No checkpoint file found')
return
#time.sleep(EVAL_INTERVAL_SECS) def evaluate_mobilenet(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input') #mobilenet 把输入数据变成与w矩阵同纬度的
x_image = tf.reshape(x, [-1,28,28,1])
x_image = tf.image.resize_image_with_crop_or_pad(x_image, 28*4,28*4)
y = mnist_inference.inference_mobilenet(x_image, None) correcgt_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correcgt_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variable_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variable_to_restore) input = mnist.validation.images
label = mnist.validation.labels
batch_size = 100
TEST_STEPS = input.shape[0] / batch_size
sum_accury = 0.0
#while True:
if 1:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
#load the model
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
for i in range(int(TEST_STEPS)):
input_batch = input[i*batch_size : (i + 1)*batch_size, :]
label_batch = label[i*batch_size : (i + 1)*batch_size, :]
validate_feed = {x: input_batch, y_: label_batch}
# 取出部分数据测试
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
sum_accury += accuracy_score
print("test %s batch steps, validation accuracy = %g" % (i, accuracy_score)) else:
print('No checkpoint file found')
return
#time.sleep(EVAL_INTERVAL_SECS)
print("After %s training steps, all validation accuracy = %g" % (global_step, sum_accury / TEST_STEPS)) def main(argv=None):
mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
evaluate_mobilenet(mnist) if __name__ == '__main__':
tf.app.run()

mnist_inference.py

#coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function import numpy as np
import tensorflow as tf import mobilenet_v1 slim = tf.contrib.slim #define the variables of nerual network
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500 def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1)) if regularizer != None:
tf.add_to_collection('losses', regularizer(weights)) return weights #define the forward network with MLPnet
def inference_MLP(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases return layer2 #define the forward network with mobilenet_v1
def inference_mobilenet(input_tensor, regularizer):
#inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
logits, end_points = mobilenet_v1.mobilenet_v1(
input_tensor,
num_classes=OUTPUT_NODE,
dropout_keep_prob=0.8,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=tf.contrib.layers.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False
) return logits

mobilenet_v1.py

从此处下载

https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py

TensorFlow基础笔记(13) Mobilenet训练测试mnist数据的更多相关文章

  1. TensorFlow基础笔记(13) tf.name_scope tf.variable_scope学习

    转载http://blog.csdn.net/jerr__y/article/details/60877873 1. 首先看看比较简单的 tf.name_scope(‘scope_name’). tf ...

  2. TensorFlow基础笔记(0) 参考资源学习文档

    1 官方文档 https://www.tensorflow.org/api_docs/ 2 极客学院中文文档 http://www.tensorfly.cn/tfdoc/api_docs/python ...

  3. TensorFlow基础笔记(3) cifar10 分类学习

    TensorFlow基础笔记(3) cifar10 分类学习 CIFAR-10 is a common benchmark in machine learning for image recognit ...

  4. 机器学习实战 - 读书笔记(13) - 利用PCA来简化数据

    前言 最近在看Peter Harrington写的"机器学习实战",这是我的学习心得,这次是第13章 - 利用PCA来简化数据. 这里介绍,机器学习中的降维技术,可简化样品数据. ...

  5. TensorFlow学习笔记——LeNet-5(训练自己的数据集)

    在之前的TensorFlow学习笔记——图像识别与卷积神经网络(链接:请点击我)中了解了一下经典的卷积神经网络模型LeNet模型.那其实之前学习了别人的代码实现了LeNet网络对MNIST数据集的训练 ...

  6. Tensorflow学习笔记(一):MNIST机器学习入门

    学习深度学习,首先从深度学习的入门MNIST入手.通过这个例子,了解Tensorflow的工作流程和机器学习的基本概念. 一  MNIST数据集 MNIST是入门级的计算机视觉数据集,包含了各种手写数 ...

  7. TensorFlow基础笔记(2) minist分类学习

    (1) 最简单的神经网络分类器 # encoding: UTF-8 import tensorflow as tf from tensorflow.examples.tutorials.mnist i ...

  8. tensorflow学习笔记3:写一个mnist rpc服务

    本篇做一个没有实用价值的mnist rpc服务,重点记录我在调试整合tensorflow和opencv时遇到的问题: 准备模型 mnist的基础模型结构就使用tensorflow tutorial给的 ...

  9. TensorFlow基础笔记(14) 网络模型的保存与恢复_mnist数据实例

    http://blog.csdn.net/huachao1001/article/details/78502910 http://blog.csdn.net/u014432647/article/de ...

随机推荐

  1. errno之我见

    Errno能帮我们找到系统函数的错误信息. 比方open函数,假设正常返回时,其返回值是一个非负的整数. 异常时会返回-1.同一时候该系统函数会设置errno的值.让我们能够了解错误的原因. Errn ...

  2. 检测到 Mac 文件格式: 请将源文件转换为 DOS 格式或 UNIX 格式

    非常简单可以解决: 方法一,用editplus打开,再复制回VS 方法二:在VS中,点文件->高级保存选项,然后在行尾选项中选择windows(CR LF),重新编译,ok

  3. Python 列表 remove() 方法

    描述 Python 列表 remove() 方法通过指定元素的值来移除列表中某个元素的第一个匹配项,如果这个元素不在列表中会报一个异常. 语法 remove() 方法语法: L.remove(obj) ...

  4. MAR 27 解决华为手机访问Google Play:从服务器检索信息时出错。[DF-DFERH-01]

    虽然路由器已经设置了梯子,但是用华为手机访问Google Play时,还是提示:从服务器检索信息时出错.[DF-DFERH-01].   虽然在手机上把梯子设置成全局模式,连接Google Play后 ...

  5. winfrom 窗口起始位置为屏幕中央

    窗口起始位置为屏幕中央 this.StartPosition = System.Windows.Forms.FormStartPosition.CenterScreen; 获取鼠标触发事件光标位置 t ...

  6. Spring里的FactoryBean和BeanFactory有啥区别?

    分别看这俩文章就知道了 Spring的FactoryBean使用 Spring加载xml配置文件的方式 ApplicationContext

  7. 使用perldoc阅读perl文档

    perl在安装的时候,就给我们送上一份大礼,组织精美,解释详细的perl百科全书已经安装在你的电脑里面了,遇到问题不要在去搜索那些博客了,还是练练英文,看看perldoc吧,呵呵. 1.用perldo ...

  8. 每日英语:Why Are Fractions Key To Future Math Success?

    Many students cruise along just fine in math until fourth grade or so. Then, they hit a wall -- frac ...

  9. 使用TortoiseGit操作分支的创建与合并功能集合

    使用TortoiseGit操作分支的创建与合并 本文在介绍了软件安装和设置后, 写了TortoiseGit 常用的一些功能, 包括: 创建新库 添加文件及文件夹 创建分支 看分支情况及修改log 比较 ...

  10. SVN开启端口监听,并设置开机启动

    svnserve -d -r /home/svn/repo --listen-port=3690,svn仓库地址,及监听端口 vi svn_startup.sh,位置在/root下面编辑一个启动脚本, ...