不带dropout程序并通过tensorboard查看loss的图像

"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer # load data
digits = load_digits()
X = digits.data
y = digits.target
y = LabelBinarizer().fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3) def add_layer(inputs, in_size, out_size, layer_name, activation_function=None, ):
# add one more layer and return the output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, )
Wx_plus_b = tf.matmul(inputs, Weights) + biases
# here to dropout if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b, )
tf.summary.histogram(layer_name + '/outputs', outputs)
return outputs # define placeholder for inputs to network xs = tf.placeholder(tf.float32, [None, 64]) # 8x8
ys = tf.placeholder(tf.float32, [None, 10]) # add output layer
l1 = add_layer(xs, 64, 50, 'l1', activation_function=tf.nn.tanh)
prediction = add_layer(l1, 50, 10, 'l2', activation_function=tf.nn.softmax) # the loss between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
reduction_indices=[1])) # loss
tf.summary.scalar('loss', cross_entropy)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) sess = tf.Session()
merged = tf.summary.merge_all()
# summary writer goes in here
train_writer = tf.summary.FileWriter("logs/train", sess.graph)
test_writer = tf.summary.FileWriter("logs/test", sess.graph) # tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init) for i in range(500):
# here to determine the keeping probability
sess.run(train_step, feed_dict={xs: X_train, ys: y_train})
if i % 50 == 0:
# record loss
train_result = sess.run(merged, feed_dict={xs: X_train, ys: y_train})
test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test})
train_writer.add_summary(train_result, i)
test_writer.add_summary(test_result, i)

执行完之后在执行目录之下有一个log目录生成了对应的tensorboard显示文件

使用 tensorboard --logdir="logs/" --port=8011 即可在浏览器访问

带有dropout的程序并通过tensoeboard生成loss图像观察

"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer # load data
digits = load_digits()
X = digits.data
y = digits.target
y = LabelBinarizer().fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3) def add_layer(inputs, in_size, out_size, layer_name, activation_function=None, ):
# add one more layer and return the output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, )
Wx_plus_b = tf.matmul(inputs, Weights) + biases
# here to dropout
Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b, )
tf.summary.histogram(layer_name + '/outputs', outputs)
return outputs # define placeholder for inputs to network
keep_prob = tf.placeholder(tf.float32) #dropout
xs = tf.placeholder(tf.float32, [None, 64]) # 8x8
ys = tf.placeholder(tf.float32, [None, 10]) # add output layer
l1 = add_layer(xs, 64, 50, 'l1', activation_function=tf.nn.tanh)
prediction = add_layer(l1, 50, 10, 'l2', activation_function=tf.nn.softmax) # the loss between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
reduction_indices=[1])) # loss
tf.summary.scalar('loss', cross_entropy)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) sess = tf.Session()
merged = tf.summary.merge_all()
# summary writer goes in here
train_writer = tf.summary.FileWriter("logs/train", sess.graph)
test_writer = tf.summary.FileWriter("logs/test", sess.graph) # tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init) for i in range(500):
# here to determine the keeping probability
sess.run(train_step, feed_dict={xs: X_train, ys: y_train, keep_prob: 0.5})
if i % 50 == 0:
# record loss
train_result = sess.run(merged, feed_dict={xs: X_train, ys: y_train, keep_prob: 1})
test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test, keep_prob: 1})
train_writer.add_summary(train_result, i)
test_writer.add_summary(test_result, i)

图片显示:

1.9TF的过拟合-dropout的更多相关文章

  1. 理解dropout——本质是通过阻止特征检测器的共同作用来防止过拟合 Dropout是指在模型训练时随机让网络某些隐含层节点的权重不工作,不工作的那些节点可以暂时认为不是网络结构的一部分,但是它的权重得保留下来(只是暂时不更新而已),因为下次样本输入时它可能又得工作了

    理解dropout from:http://blog.csdn.net/stdcoutzyx/article/details/49022443 http://www.cnblogs.com/torna ...

  2. [CS231n-CNN] Training Neural Networks Part 1 : parameter updates, ensembles, dropout

    课程主页:http://cs231n.stanford.edu/ ___________________________________________________________________ ...

  3. [DeeplearningAI笔记]改善深层神经网络1.4_1.8深度学习实用层面_正则化Regularization与改善过拟合

    觉得有用的话,欢迎一起讨论相互学习~Follow Me 1.4 正则化(regularization) 如果你的神经网络出现了过拟合(训练集与验证集得到的结果方差较大),最先想到的方法就是正则化(re ...

  4. Dropout原理分析

    工作流程 dropout用于解决过拟合,通过在每个batch中删除某些节点(cell)进行训练,从而提高模型训练的效果. 通过随机化一个伯努利分布,然后于输入y进行乘法,将对应位置的cell置零.然后 ...

  5. Python机器学习笔记:不得不了解的机器学习面试知识点(1)

    机器学习岗位的面试中通常会对一些常见的机器学习算法和思想进行提问,在平时的学习过程中可能对算法的理论,注意点,区别会有一定的认识,但是这些知识可能不系统,在回答的时候未必能在短时间内答出自己的认识,因 ...

  6. 深度学习(六)keras常用函数学习

    原文作者:aircraft 原文链接:https://www.cnblogs.com/DOMLX/p/9769301.html Keras是什么? Keras:基于Theano和TensorFlow的 ...

  7. 截图:【炼数成金】深度学习框架Tensorflow学习与应用

    创建图.启动图 Shift+Tab Tab 变量介绍: F etch Feed 简单的模型构造 :线性回归 MNIST数据集 Softmax函数 非线性回归神经网络   MINIST数据集分类器简单版 ...

  8. Tensorflow 之模型内容可视化

    TensorFlow模型保存和提取方法 1. tensorflow实现 卷积神经网络CNN:Tensorflow实现(以及对卷积特征的可视化) # 卷积网络的训练数据为MNIST(28*28灰度单色图 ...

  9. CNN卷积神经网络的构建

    1.卷积神经网络由输入层,卷积层,激活函数,池化层,全连接层组成. input(输入层)--conv(卷积层)--relu(激活函数)--pool(池化层)--fc(全连接层) 2.卷积层: 主要用来 ...

随机推荐

  1. python--面向对象--14

    原创博文,转载请标明出处--周学伟http://www.cnblogs.com/zxouxuewei/ Python 面向对象 Python从设计之初就已经是一门面向对象的语言,正因为如此,在Pyth ...

  2. RF判断列表、字典、整数、字符串类型是否相同方法

      ${d} create list shk shsh${w} create list ${e} evaluate type(${d}) ${t} evaluate type(${w}) should ...

  3. Android开发-- findViewById()方法得到空指针

    如果想通过调用findViewById()方法获取到相应的控件,必须要求当前Activity的layout通过setContentView. 如果你通过其他方法添加了一个layout,如需获取这个la ...

  4. ARM+LINUX嵌入式系统的终端显示中文乱码解决

    前一段时间解决的一个问题,看起来是个小问题,实际解决这个问题却花了一个星期的晚上休息时间,记录分享一下. 问题描述: linux内核配置中NLS(native language support)已经选 ...

  5. Clock skewdetected. Your build may be incomplete.

    解决方法: find . -type f | xargs -n touch make clean make

  6. web基础----->jersey整合jetty开发restful应用(一)

    这里介绍一个jersey与jetty整合开发restful应用的知识.将过去和羁绊全部丢弃,不要吝惜那为了梦想流下的泪水. jersey与jetty的整合 一.创建一个maven项目,pom.xml的 ...

  7. shell截取字符串的一些简单方法

    一.使用${} 1.${var##*/}该命令的作用是去掉变量var从左边算起的最后一个'/'字符及其左边的内容,返回从左边算起的最后一个'/'(不含该字符)的右边的内容.使用例子及结果如下:

  8. Esper学习之十四:Pattern(一)

    1. Pattern Atoms and Pattern operatorsPattern是通过原子事件和操作符组合在一起构成模板.原子事件有3类,操作符有4类,具体如下: 原子事件:1). 普通事件 ...

  9. 【Shell脚本编程系列】知识储备以及建立规范的脚本

    前言 学习shell脚本编程需要的知识储备: vi/vim编辑器命令 vimrc设置要熟练 基础命令,100多个要熟练 基础和常用的网络服务命令要会:nfs . rsync. inotify . la ...

  10. war部署到tomcat

    gs-rest-service-0.1.0.war复制到tomcat-9.0.0.M17\webapps\ 打开server.xml,这Host节点,加入<Context path=" ...