TensorFlow基础笔记(13) Mobilenet训练测试mnist数据
主要是四个文件
mnist_train.py
- #coding: utf-8
- import os
- import tensorflow as tf
- from tensorflow.examples.tutorials.mnist import input_data
- import mnist_inference
- BATCH_SIZE = 100
- LEARNING_RATE_BASE = 0.8
- LEARNING_RATE_DECAY = 0.99
- REGULARAZTION_RATE = 0.0001
- TRAINING_STEPS =10000
- MOVING_AVERAGE_DECAY = 0.99
- MODEL_SAVE_PATH = "./mobilenet_v1_model/"
- MODEL_NAME = "model.ckpt"
- channels = 1
- def train_MLP(mnist):
- x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
- y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
- regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
- y = mnist_inference.inference_MLP(x, regularizer)
- global_step = tf.Variable(0, trainable=False)
- variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
- variable_averages_op = variable_averages.apply(tf.trainable_variables())
- cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
- cross_entropy_mean = tf.reduce_mean(cross_entropy)
- loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
- learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
- train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
- with tf.control_dependencies([train_step, variable_averages_op]):
- train_op = tf.no_op(name='train')
- saver = tf.train.Saver()
- with tf.Session() as sess:
- tf.initialize_all_variables().run()
- for i in range(TRAINING_STEPS):
- xs, ys = mnist.train.next_batch(BATCH_SIZE)
- _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
- if i % 1000 == 0:
- print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
- # print os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
- saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
- def train_mobilenet(mnist):
- x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
- y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
- regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
- #mobilenet 把输入数据变成与w矩阵同纬度的
- x_image = tf.reshape(x, [-1,28,28,1])
- x_image = tf.image.resize_image_with_crop_or_pad(x_image, 28*4,28*4)
- y = mnist_inference.inference_mobilenet(x_image, regularizer)
- global_step = tf.Variable(0, trainable=False)
- variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
- variable_averages_op = variable_averages.apply(tf.trainable_variables())
- cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
- cross_entropy_mean = tf.reduce_mean(cross_entropy)
- loss = cross_entropy_mean #+ tf.add_n(tf.get_collection('losses'))
- learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
- train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
- with tf.control_dependencies([train_step, variable_averages_op]):
- train_op = tf.no_op(name='train')
- saver = tf.train.Saver()
- with tf.Session() as sess:
- tf.initialize_all_variables().run()
- for i in range(TRAINING_STEPS):
- xs, ys = mnist.train.next_batch(BATCH_SIZE)
- _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
- if i % 1000 == 0:
- print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
- # print os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
- saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
- else:
- print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
- def main(argv=None):
- mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
- train_mobilenet(mnist)
- if __name__ == '__main__':
- tf.app.run()
mnist_eval.py
- #coding: utf-8
- import time
- import tensorflow as tf
- from tensorflow.examples.tutorials.mnist import input_data
- import mnist_inference
- import mnist_train
- #every 10 sec load the newest model
- EVAL_INTERVAL_SECS = 10
- def evaluate_MLP(mnist):
- with tf.Graph().as_default() as g:
- x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
- y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
- validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
- y = mnist_inference.inference(x, None)
- correcgt_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
- accuracy = tf.reduce_mean(tf.cast(correcgt_prediction, tf.float32))
- variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
- variable_to_restore = variable_averages.variables_to_restore()
- saver = tf.train.Saver(variable_to_restore)
- #while True:
- if 1:
- with tf.Session() as sess:
- ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
- if ckpt and ckpt.model_checkpoint_path:
- #load the model
- saver.restore(sess, ckpt.model_checkpoint_path)
- global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
- accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
- print("After %s training steps, validation accuracy = %g" % (global_step, accuracy_score))
- else:
- print('No checkpoint file found')
- return
- #time.sleep(EVAL_INTERVAL_SECS)
- def evaluate_mobilenet(mnist):
- with tf.Graph().as_default() as g:
- x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
- y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
- #mobilenet 把输入数据变成与w矩阵同纬度的
- x_image = tf.reshape(x, [-1,28,28,1])
- x_image = tf.image.resize_image_with_crop_or_pad(x_image, 28*4,28*4)
- y = mnist_inference.inference_mobilenet(x_image, None)
- correcgt_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
- accuracy = tf.reduce_mean(tf.cast(correcgt_prediction, tf.float32))
- variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
- variable_to_restore = variable_averages.variables_to_restore()
- saver = tf.train.Saver(variable_to_restore)
- input = mnist.validation.images
- label = mnist.validation.labels
- batch_size = 100
- TEST_STEPS = input.shape[0] / batch_size
- sum_accury = 0.0
- #while True:
- if 1:
- with tf.Session() as sess:
- ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
- if ckpt and ckpt.model_checkpoint_path:
- #load the model
- saver.restore(sess, ckpt.model_checkpoint_path)
- global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
- for i in range(int(TEST_STEPS)):
- input_batch = input[i*batch_size : (i + 1)*batch_size, :]
- label_batch = label[i*batch_size : (i + 1)*batch_size, :]
- validate_feed = {x: input_batch, y_: label_batch}
- # 取出部分数据测试
- accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
- sum_accury += accuracy_score
- print("test %s batch steps, validation accuracy = %g" % (i, accuracy_score))
- else:
- print('No checkpoint file found')
- return
- #time.sleep(EVAL_INTERVAL_SECS)
- print("After %s training steps, all validation accuracy = %g" % (global_step, sum_accury / TEST_STEPS))
- def main(argv=None):
- mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
- evaluate_mobilenet(mnist)
- if __name__ == '__main__':
- tf.app.run()
mnist_inference.py
- #coding: utf-8
- from __future__ import absolute_import
- from __future__ import division
- from __future__ import print_function
- import numpy as np
- import tensorflow as tf
- import mobilenet_v1
- slim = tf.contrib.slim
- #define the variables of nerual network
- INPUT_NODE = 784
- OUTPUT_NODE = 10
- LAYER1_NODE = 500
- def get_weight_variable(shape, regularizer):
- weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
- if regularizer != None:
- tf.add_to_collection('losses', regularizer(weights))
- return weights
- #define the forward network with MLPnet
- def inference_MLP(input_tensor, regularizer):
- with tf.variable_scope('layer1'):
- weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
- biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
- layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
- with tf.variable_scope('layer2'):
- weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
- biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
- layer2 = tf.matmul(layer1, weights) + biases
- return layer2
- #define the forward network with mobilenet_v1
- def inference_mobilenet(input_tensor, regularizer):
- #inputs = tf.random_uniform((batch_size, height, width, 3))
- with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
- normalizer_fn=slim.batch_norm):
- logits, end_points = mobilenet_v1.mobilenet_v1(
- input_tensor,
- num_classes=OUTPUT_NODE,
- dropout_keep_prob=0.8,
- is_training=True,
- min_depth=8,
- depth_multiplier=1.0,
- conv_defs=None,
- prediction_fn=tf.contrib.layers.softmax,
- spatial_squeeze=True,
- reuse=None,
- scope='MobilenetV1',
- global_pool=False
- )
- return logits
mobilenet_v1.py
从此处下载
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py
TensorFlow基础笔记(13) Mobilenet训练测试mnist数据的更多相关文章
- TensorFlow基础笔记(13) tf.name_scope tf.variable_scope学习
转载http://blog.csdn.net/jerr__y/article/details/60877873 1. 首先看看比较简单的 tf.name_scope(‘scope_name’). tf ...
- TensorFlow基础笔记(0) 参考资源学习文档
1 官方文档 https://www.tensorflow.org/api_docs/ 2 极客学院中文文档 http://www.tensorfly.cn/tfdoc/api_docs/python ...
- TensorFlow基础笔记(3) cifar10 分类学习
TensorFlow基础笔记(3) cifar10 分类学习 CIFAR-10 is a common benchmark in machine learning for image recognit ...
- 机器学习实战 - 读书笔记(13) - 利用PCA来简化数据
前言 最近在看Peter Harrington写的"机器学习实战",这是我的学习心得,这次是第13章 - 利用PCA来简化数据. 这里介绍,机器学习中的降维技术,可简化样品数据. ...
- TensorFlow学习笔记——LeNet-5(训练自己的数据集)
在之前的TensorFlow学习笔记——图像识别与卷积神经网络(链接:请点击我)中了解了一下经典的卷积神经网络模型LeNet模型.那其实之前学习了别人的代码实现了LeNet网络对MNIST数据集的训练 ...
- Tensorflow学习笔记(一):MNIST机器学习入门
学习深度学习,首先从深度学习的入门MNIST入手.通过这个例子,了解Tensorflow的工作流程和机器学习的基本概念. 一 MNIST数据集 MNIST是入门级的计算机视觉数据集,包含了各种手写数 ...
- TensorFlow基础笔记(2) minist分类学习
(1) 最简单的神经网络分类器 # encoding: UTF-8 import tensorflow as tf from tensorflow.examples.tutorials.mnist i ...
- tensorflow学习笔记3:写一个mnist rpc服务
本篇做一个没有实用价值的mnist rpc服务,重点记录我在调试整合tensorflow和opencv时遇到的问题: 准备模型 mnist的基础模型结构就使用tensorflow tutorial给的 ...
- TensorFlow基础笔记(14) 网络模型的保存与恢复_mnist数据实例
http://blog.csdn.net/huachao1001/article/details/78502910 http://blog.csdn.net/u014432647/article/de ...
随机推荐
- LeetCode总结 -- 一维数据合并篇
合并是一维数据结构中非经常见的操作, 一般是排序, 分布式算法中的子操作. 这篇总结主要介绍LeetCode中关于合并的几个题目: Merge Two Sorted ListsMerge Sorted ...
- 【JUnit4.10源码分析】3.4 Description与測试树
Description使用组合模式描写叙述一个測试树.组合模式中全部元素都是Composite对象. Description有成员变量private final ArrayList<Descri ...
- Flume入门
1.Flume是什么? ○ Flume是由cloudera开发的实时日志收集系统 ○ 核心概念是由一个叫做Agent(代理节点)的java进程运行在日志收集节点 ○ Flume在0.94. ...
- "DISTINCT" make huge difference
继上一篇提到的UNION/UNION ALL会影响执行计划,再次碰到一个类似的问题.一个SQL加了DISTINCT跟不加DISTINCT的执行计划完全不同,导致执行时间差了好多倍. 原始的SQL如下所 ...
- CCTableView(一)
#ifndef __TABLEVIEWTESTSCENE_H__ #define __TABLEVIEWTESTSCENE_H__ #include "cocos2d.h" #in ...
- C/C++预定义宏
编译器识别预定义的 ANSI/ISO C99 C 预处理宏,Microsoft C++ 实现将提供更多宏.这些预处理器宏不带参数,并且不能重新定义. ANSI 兼容的预定义宏 __FILE__,__L ...
- rzsz安装【转】
环境:CentOS 发生情况:需要安装工具:szrz 工具进行 windows 和linux传文件 安装方式:从网上其他教程找的所以就按照如下方式操作 1. 下载软件 rzsz-3.34.tar.gz ...
- 过滤4字节及以上的字符c++实现
这个是根据php的一个版本改的,用来处理utf-8编码的多字节字符,比如中文,俄文等等. #include <iostream> #include <string> int s ...
- 进制转化之递归 && 栈
将10进制转换成2进制,是除以2得到的余数,再倒序排列,这可以用递归实现,也可以用数据结构——栈实现. 先看递归实现: #include<stdio.h> void to_two(int ...
- c++之——抽象基类
在一个虚函数的声明语句的分号前加上 =0:就可以将一个虚函数变成纯虚函数,其中,=0只能出现在类内部的虚函数声明语句处.纯虚函数只用声明,而不用定义,其存在就是为了提供接口,含有纯虚函数的类是抽象基类 ...