主要是四个文件

mnist_train.py

#coding: utf-8
import os import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data import mnist_inference BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS =10000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./mobilenet_v1_model/"
MODEL_NAME = "model.ckpt"
channels = 1 def train_MLP(mnist):
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE) y = mnist_inference.inference_MLP(x, regularizer) global_step = tf.Variable(0, trainable=False) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train') saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run() for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
# print os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) def train_mobilenet(mnist):
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE) #mobilenet 把输入数据变成与w矩阵同纬度的
x_image = tf.reshape(x, [-1,28,28,1])
x_image = tf.image.resize_image_with_crop_or_pad(x_image, 28*4,28*4)
y = mnist_inference.inference_mobilenet(x_image, regularizer) global_step = tf.Variable(0, trainable=False) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean #+ tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train') saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run() for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
# print os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
else:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value)) def main(argv=None):
mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
train_mobilenet(mnist) if __name__ == '__main__':
tf.app.run()

mnist_eval.py

#coding: utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data import mnist_inference
import mnist_train #every 10 sec load the newest model
EVAL_INTERVAL_SECS = 10 def evaluate_MLP(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels} y = mnist_inference.inference(x, None) correcgt_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correcgt_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variable_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variable_to_restore) #while True:
if 1:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
#load the model
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training steps, validation accuracy = %g" % (global_step, accuracy_score)) else:
print('No checkpoint file found')
return
#time.sleep(EVAL_INTERVAL_SECS) def evaluate_mobilenet(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input') #mobilenet 把输入数据变成与w矩阵同纬度的
x_image = tf.reshape(x, [-1,28,28,1])
x_image = tf.image.resize_image_with_crop_or_pad(x_image, 28*4,28*4)
y = mnist_inference.inference_mobilenet(x_image, None) correcgt_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correcgt_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variable_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variable_to_restore) input = mnist.validation.images
label = mnist.validation.labels
batch_size = 100
TEST_STEPS = input.shape[0] / batch_size
sum_accury = 0.0
#while True:
if 1:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
#load the model
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
for i in range(int(TEST_STEPS)):
input_batch = input[i*batch_size : (i + 1)*batch_size, :]
label_batch = label[i*batch_size : (i + 1)*batch_size, :]
validate_feed = {x: input_batch, y_: label_batch}
# 取出部分数据测试
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
sum_accury += accuracy_score
print("test %s batch steps, validation accuracy = %g" % (i, accuracy_score)) else:
print('No checkpoint file found')
return
#time.sleep(EVAL_INTERVAL_SECS)
print("After %s training steps, all validation accuracy = %g" % (global_step, sum_accury / TEST_STEPS)) def main(argv=None):
mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
evaluate_mobilenet(mnist) if __name__ == '__main__':
tf.app.run()

mnist_inference.py

#coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function import numpy as np
import tensorflow as tf import mobilenet_v1 slim = tf.contrib.slim #define the variables of nerual network
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500 def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1)) if regularizer != None:
tf.add_to_collection('losses', regularizer(weights)) return weights #define the forward network with MLPnet
def inference_MLP(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases return layer2 #define the forward network with mobilenet_v1
def inference_mobilenet(input_tensor, regularizer):
#inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
logits, end_points = mobilenet_v1.mobilenet_v1(
input_tensor,
num_classes=OUTPUT_NODE,
dropout_keep_prob=0.8,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=tf.contrib.layers.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False
) return logits

mobilenet_v1.py

从此处下载

https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py

TensorFlow基础笔记(13) Mobilenet训练测试mnist数据的更多相关文章

  1. TensorFlow基础笔记(13) tf.name_scope tf.variable_scope学习

    转载http://blog.csdn.net/jerr__y/article/details/60877873 1. 首先看看比较简单的 tf.name_scope(‘scope_name’). tf ...

  2. TensorFlow基础笔记(0) 参考资源学习文档

    1 官方文档 https://www.tensorflow.org/api_docs/ 2 极客学院中文文档 http://www.tensorfly.cn/tfdoc/api_docs/python ...

  3. TensorFlow基础笔记(3) cifar10 分类学习

    TensorFlow基础笔记(3) cifar10 分类学习 CIFAR-10 is a common benchmark in machine learning for image recognit ...

  4. 机器学习实战 - 读书笔记(13) - 利用PCA来简化数据

    前言 最近在看Peter Harrington写的"机器学习实战",这是我的学习心得,这次是第13章 - 利用PCA来简化数据. 这里介绍,机器学习中的降维技术,可简化样品数据. ...

  5. TensorFlow学习笔记——LeNet-5(训练自己的数据集)

    在之前的TensorFlow学习笔记——图像识别与卷积神经网络(链接:请点击我)中了解了一下经典的卷积神经网络模型LeNet模型.那其实之前学习了别人的代码实现了LeNet网络对MNIST数据集的训练 ...

  6. Tensorflow学习笔记(一):MNIST机器学习入门

    学习深度学习,首先从深度学习的入门MNIST入手.通过这个例子,了解Tensorflow的工作流程和机器学习的基本概念. 一  MNIST数据集 MNIST是入门级的计算机视觉数据集,包含了各种手写数 ...

  7. TensorFlow基础笔记(2) minist分类学习

    (1) 最简单的神经网络分类器 # encoding: UTF-8 import tensorflow as tf from tensorflow.examples.tutorials.mnist i ...

  8. tensorflow学习笔记3:写一个mnist rpc服务

    本篇做一个没有实用价值的mnist rpc服务,重点记录我在调试整合tensorflow和opencv时遇到的问题: 准备模型 mnist的基础模型结构就使用tensorflow tutorial给的 ...

  9. TensorFlow基础笔记(14) 网络模型的保存与恢复_mnist数据实例

    http://blog.csdn.net/huachao1001/article/details/78502910 http://blog.csdn.net/u014432647/article/de ...

随机推荐

  1. Tomcat7 catalina.out 日志切割

    安装步骤例如以下: 下载(最新版本号)     #  wget http://cronolog.org/download/cronolog-1.6.2.tar.gz 假设下载不了,直接网上查找,ftp ...

  2. spring中事务配置

    1 如果在方法.类.接口上使用注解的方式声明事务,需要在配置文件中进行配置,以便通知 Spring 容器对标注 @Transactional 注解的 bean 加工处理. 首先需要引入 tx 命名空间 ...

  3. 啸叫抑制(howling suppression)

    作者:桂. 时间:2017-05-16 链接:http://www.cnblogs.com/xingshansi/p/6862683.html 前言 下午看到群里讨论有人提到这个问题,记录一下. 一. ...

  4. PHP采集库-Snoopy.class.php

    Snoopy是一个php类,用来模拟浏览器的功能,可以获取网页内容,发送表单. Snoopy的特点: 1.抓取网页的内容 fetch 2.抓取网页的文本内容 (去除HTML标签) fetchtext ...

  5. 在Spring中注入Java集合

    集合注入重要是对数组.List.Set.map的注入,具体注入方法请参照一下代码(重点是applicationContext.xml中对这几个集合注入的方式): 1.在工程中新建一个Departmen ...

  6. 【Android】9.2 内置行视图的分类和呈现效果

    分类:C#.Android.VS2015: 创建日期:2016-02-18 一.简介 Android内置了很多行视图模板,在应用程序中可直接使用这些内置的视图来呈现列表项. 要在ListView中使用 ...

  7. 【Android】5.5 状态切换(Switch)和评级条(RatingBar)

    分类:C#.Android.VS2015: 创建日期:2016-02-07 一.简介 1.利用Switch或者ToggleButton切换状态 如果只有两种状态,可以用ToggleButton控件或S ...

  8. js正则匹配中文

    alert(/[\u4e00-\u9fa5]{4}/.test("司徒正美"))//true alert(/[\u4e00-\u9fa5]{4}/.test("司正美&q ...

  9. tomcat的server.xml配置及context配置直接引用工程

    server.xml配置简介         下面是这个文件中的基本配置信息,更具体的配置信息见tomcat的文档         server:         port     指定一个端口,这个 ...

  10. Cocos2dx Label

    cocos2dx3.0添加了一种新的文本标签,这种标签不同的地方有:使用freetype来使它在不同的平台上有相同的视觉效果:由于使用更快的缓存代理,它的渲染也将更加快速:同时它还提供了绘边.阴影等特 ...