VGGNet学习——实践
0 - DataSet
http://www.csc.kth.se/~att/Site/Animals.html
1 - Code
1.1 - Import Packages
import tensorflow as tf
import os, glob
import numpy as np
from skimage import io, transform
1.2 - Initialize Parameters
DATA_PATH = "animal_database/"
INPUT_W = 224
INPUT_H = 224
INPUT_C = 3
OUTPUT_C = 19
TRAINING_STEPS = 50
MODEL_SAVE_PATH = "model"
MODEL_NAME = "model.ckpt"
BATCH_SIZE = 64
LEARNING_RATE_BASE = 1e-6
LEARNING_RATE_DECAY = 0.99
MOMENTUM = 0.9
TRAIN_KEEP_PROB = 0.6
VAL_KEEP_PROB = 1.0
TEST_KEEP_PROB = 1.0
1.3 - Build Data Reader
class DCdataset(object):
def __init__(self, path, w, h, c, ratio=0.8): def onehot(n):
l = np.zeros([OUTPUT_C])
l[n] = 1
return l print("Process images start")
cate = [path+x for x in os.listdir(path) if os.path.isdir(path+x)]
x = []
y = []
for (i, folder) in enumerate(cate):
for img_path in glob.glob(folder+"/original/*.jpg"):
# print("reading the image: %s" % img_path)
img = io.imread(img_path)
img = transform.resize(img, (w, h, c))
x.append(img)
y.append(i)
x = np.asarray(x, np.float32)
y = np.asarray(y, np.int32) num_example = x.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
x = x[arr]
y = y[arr]
x = np.asarray([np.reshape(x_, (w, h, c)) for x_ in x])
y = np.asarray([onehot(y_) for y_ in y])
s = np.int(num_example * ratio)
self.x_train, self.x_val = x[:s], x[s:]
self.y_train, self.y_val = y[:s], y[s:]
self.train_size = s
self.val_size = num_example - s
print("Process images end") def next_batch(self, batch_size):
arr = np.arange(self.train_size)
np.random.shuffle(arr)
arr = arr[:batch_size]
batch_x = self.x_train[arr]
batch_y = self.y_train[arr]
return batch_x, batch_y def next_val_batch(self, batch_size):
arr = np.arange(self.val_size)
np.random.shuffle(arr)
arr = arr[:batch_size]
batch_x = self.x_val[arr]
batch_y = self.y_val[arr]
return batch_x, batch_y
1.4 - Build Network
def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):
n_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope+"w",
shape=[kh, kw, n_in, n_out], dtype=tf.float32,
# initializer=tf.truncated_normal_initializer(mean=0, stddev=10e-2))
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding="SAME")
bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)
biases = tf.Variable(bias_init_val, trainable=True, name="b")
z = tf.nn.bias_add(conv, biases)
activation = tf.nn.relu(z, name=scope)
p += [kernel, biases]
return activation
def fc_op(input_op, name, n_out, p):
n_in = input_op.get_shape()[-1].value with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope+"w",
shape=[n_in, n_out], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.Variable(tf.constant(0.1, shape=[n_out],
dtype=tf.float32), name="b")
activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)
p += [kernel, biases]
return activation
def mpool_op(input_op, name, kh, kw, dh, dw):
return tf.nn.max_pool(input_op,
ksize=[1, kh, kw, 1],
strides=[1, dh, dw, 1],
padding="SAME",
name=name)
def inference_op(input_op, keep_prob):
p = [] conv1_1 = conv_op(input_op, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
conv1_2 = conv_op(conv1_1, name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
pool1 = mpool_op(conv1_2, name="pool1", kh=2, kw=2, dh=2, dw=2) conv2_1 = conv_op(pool1, name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
conv2_2 = conv_op(conv2_1, name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
pool2 = mpool_op(conv2_2, name="pool2", kh=2, kw=2, dh=2, dw=2) conv3_1 = conv_op(pool2, name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_2 = conv_op(conv3_1, name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_3 = conv_op(conv3_2, name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
pool3 = mpool_op(conv3_3, name="pool3", kh=2, kw=2, dh=2, dw=2) conv4_1 = conv_op(pool3, name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_2 = conv_op(conv4_1, name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_3 = conv_op(conv4_2, name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool4 = mpool_op(conv4_3, name="pool4", kh=2, kw=2, dh=2, dw=2) conv5_1 = conv_op(pool4, name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_2 = conv_op(conv5_1, name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_3 = conv_op(conv5_2, name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool5 = mpool_op(conv5_3, name="pool5", kh=2, kw=2, dh=2, dw=2) shp = pool5.get_shape()
flattened_shape = shp[1].value * shp[2].value * shp[3].value
resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1") fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)
fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop") fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)
fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop") fc8 = fc_op(fc7_drop, name="fc8", n_out=OUTPUT_C, p=p)
# softmax = tf.nn.softmax(fc8)
# predictions = tf.argmax(softmax, 1) return fc8, p
1.5 - Train
def train():
x = tf.placeholder(tf.float32, [None, INPUT_W, INPUT_H, INPUT_C], name="x-input")
y_ = tf.placeholder(tf.float32, [None, OUTPUT_C], name="y-input")
keep_prob = tf.placeholder(tf.float32, name="keep_prob") dataset = DCdataset(DATA_PATH, INPUT_W, INPUT_H, INPUT_C) global_step = tf.Variable(0, trainable=False) y, p = inference_op(x, keep_prob)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(tf.nn.softmax(y), 1), tf.argmax(y_, 1)), tf.float32))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
dataset.train_size / BATCH_SIZE,
LEARNING_RATE_DECAY
)
optimizer = tf.train.MomentumOptimizer(learning_rate, MOMENTUM).minimize(loss, global_step=global_step) # tf.reset_default_graph()
with tf.Session() as sess:
tf.initialize_all_variables().run() saver = tf.train.Saver()
for i in range(TRAINING_STEPS):
xs, ys = dataset.next_batch(BATCH_SIZE)
_, loss_value, accuracy_value, step = sess.run([optimizer, loss, accuracy, global_step],
feed_dict={x: xs, y_: ys, keep_prob: TRAIN_KEEP_PROB})
print("After %d training step(s), loss on training batch is %g, accuracy on training batch is %g%%." % (step, loss_value, accuracy_value*100)) if i % 2 == 0:
xs, ys = dataset.next_val_batch(BATCH_SIZE)
_, loss_value, accuracy_value, step = sess.run([optimizer, loss, accuracy, global_step],
feed_dict={x: xs, y_: ys, keep_prob: VAL_KEEP_PROB})
print("[Validation] Step %d: Validation loss is %g and Validation accuracy is %g%%." % (step, loss_value, accuracy_value*100))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
train()
1.6 - Test
def test(img_path, model_path): with tf.Session() as sess:
saver = tf.train.import_meta_graph(model_path+".meta")
saver.restore(sess, model_path)
graph = tf.get_default_graph() x = graph.get_tensor_by_name("x-input:0")
keep_prob = graph.get_tensor_by_name("keep_prob:0")
fc8 = graph.get_tensor_by_name("fc8:0") img = io.imread(img_path)
img = transform.resize(img, (INPUT_W, INPUT_H, INPUT_C))
y = sess.run(fc8, feed_dict={
x: np.reshape(img, [-1, INPUT_W, INPUT_H, INPUT_C]),
keep_prob: TEST_KEEP_PEOB
})
softmax = tf.nn.softmax(y)
prediction_labels = tf.argmax(softmax, 1)
print("label: ", sess.run(softmax))
img_path = os.path.join(DATA_PATH, "cougar", "original", "4400.jpg")
model_path = os.path.join(MODEL_SAVE_PATH, MODEL_NAME+"-2")
test(img_path, model_path)
VGGNet学习——实践的更多相关文章
- 使用sklearn进行集成学习——实践
系列 <使用sklearn进行集成学习——理论> <使用sklearn进行集成学习——实践> 目录 1 Random Forest和Gradient Tree Boosting ...
- Nagios学习实践系列——基本安装篇
开篇介绍 最近由于工作需要,学习研究了一下Nagios的安装.配置.使用,关于Nagios的介绍,可以参考我上篇随笔Nagios学习实践系列——产品介绍篇 实验环境 操作系统:Red Hat Ente ...
- Nagios学习实践系列——配置研究[监控当前服务器]
其实上篇Nagios学习实践系列——基本安装篇只是安装了Nagios基本组件,虽然能够打开主页,但是如果不配置相关配置文件文件,那么左边菜单很多页面都打不开,相当于只是一个空壳子.接下来,我们来学习研 ...
- 前端学习实践笔记--JavaScript深入【1】
这一年中零零散散看过几本javascript的书,回过头看之前写过的javascript学习笔记,未免有点汗颜,突出“肤浅”二字,然越深入越觉得javascript的博大精深,有种只缘身在此山中的感觉 ...
- Appium学习实践(四)结构优化
随着我们测试脚本中的用例越来越多,我们不可能将所有的用例都放在同一个脚本中,所以我们需要优化我们的结构.将脚本放在一个文件夹中,再通过别的脚本来执行脚本.这样,我们也可以有选择性的执行我们的脚本 先来 ...
- Appium学习实践(三)测试用例脚本以及测试报告输出
之前Appium学习实践(二)Python简单脚本以及元素的属性设置中的脚本,会有一个问题,就是在每个测试用例完成之后都会执行tearDown,然后重新setUp,这样导致脚本的执行效率偏低,而且会有 ...
- Appium学习实践(二)Python简单脚本以及元素的属性设置
1.简单的Python脚本 Appium中的设置与Appium学习实践(一)简易运行Appium中的一致 Launch后,执行脚本 #coding:utf-8 import unittest impo ...
- ReactNative学习实践--Navigator实践
离上次写RN笔记有一段时间了,期间参与了一个新项目,只在最近的空余时间继续学习实践,因此进度比较缓慢,不过这并不代表没有新进展,其实这个小东西离上次发文时已经有了相当大的变化了,其中影响最大的变化就是 ...
- ReactNative学习实践--动画初探之加载动画
学习和实践react已经有一段时间了,在经历了从最初的彷徨到解决痛点时的兴奋,再到不断实践后遭遇问题时的苦闷,确实被这一种新的思维方式和开发模式所折服,react不是万能的,在很多场景下滥用反而会适得 ...
随机推荐
- struts2 OGNL配和通用标签和其它标签的使用
三.OGNL配合通用标签的其他使用 1.iterator标签(很重要) 动作类 package com.itheima.web.action; import java.util.ArrayList; ...
- python基础四-文件读取
文件读取 open()接受一个参数:要打开的文件名, 并返回一个表示文件的对象, 存储到后面的变量中 python会在当前执行文件所在目录查找 可以使用绝对路径, 在linux中使用'/', 在win ...
- bzoj2434 fail树 + dfs序 + 树状数组
https://www.lydsy.com/JudgeOnline/problem.php?id=2434 打字机上只有28个按键,分别印有26个小写英文字母和'B'.'P'两个字母.经阿狸研究发现, ...
- nginx之正向代理
1.概述 nginx的正向代理,只能代理http.tcp等,不能代理https请求.有很多人不是很理解具体什么是nginx的正向代理.什么是反向代理.下面结合自己的使用做的一个简介: 1)正向代理: ...
- 从Paxos到Zookeeper分布式一致性原理与实践 读书笔记之(一) 分布式架构
1.1 从集中式到分布式 1 集中式特点 结构简单,无需考虑对多个节点的部署和节点之间的协作. 2 分布式特点 分不性:在时间可空间上随意分布,机器的分布情况随时变动 对等性:计算机之间没有主从之分 ...
- Web API中的路由(二)——属性路由
一.属性路由的概念 路由让webapi将一个uri匹配到对应的action,Web API 2支持一种新类型的路由:属性路由.顾名思义,属性路由使用属性来定义路由.通过属性路由,我们可以更好地控制We ...
- [JVM-1]Java运行时数据区域
Java虚拟机(JVM)内部定义了程序在运行时需要使用到的内存区域 这些区域都有自己的用途,以及创建和销毁的时间.有些区域随着虚拟机进程的启动而存在,有的区域则依赖用户线程的启动和结束而销毁和建立. ...
- android studio导出apk
在android studio导出的apk分为4种,一种是未签名调试版apk,一种是未签名发行版apk,一种是已签名调试版apk,还有一种是已签名发行版apk.以下将介绍这4种apk如何导出. 一.调 ...
- 腾讯云部署javaWeb项目之一应用服务器
1.登录腾讯云,点击登录选择浏览器登录.输入用户名 按回车键 然后输入 密码. 2.安装java环境,直接命令:yum -y install java-1.8.0-openjdk java-1.8.0 ...
- spring-data-redis时效设置
本人转自http://hbxflihua.iteye.com/blog/2320584#bc2396403 spring目前在@Cacheable和@CacheEvict等注解上不支持缓存时效设置,只 ...