import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data mnist = input_data.read_data_sets("D:\\F\\TensorFlow_deep_learn\\MNIST\\", one_hot=True) x_data = tf.placeholder("float32", [None, 784])
weight = tf.Variable(tf.ones([784, 10]))
bias = tf.Variable(tf.ones([10]))
y_model = tf.nn.softmax(tf.matmul(x_data, weight) + bias)
y_data = tf.placeholder("float32", [None, 10]) loss = tf.reduce_sum(tf.pow((y_model - y_data), 2)) train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init) for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x_data:batch_xs, y_data:batch_ys})
if _ % 50 == 0:
correct_prediction = tf.equal(tf.argmax(y_model, 1), tf.argmax(y_data, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(sess.run(accuracy, feed_dict={x_data: mnist.test.images, y_data: mnist.test.labels}))

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data mnist = input_data.read_data_sets("D:\\F\\TensorFlow_deep_learn\\MNIST\\", one_hot=True) x_data = tf.placeholder("float32", [None, 784])
weight = tf.Variable(tf.ones([784, 10]))
bias = tf.Variable(tf.ones([10]))
y_model = tf.nn.relu(tf.matmul(x_data, weight) + bias)
y_data = tf.placeholder("float32", [None, 10])
loss = -tf.reduce_sum(y_data*tf.log(y_model)) train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init) for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(50)
sess.run(train_step, feed_dict={x_data:batch_xs, y_data:batch_ys})
if _ % 50 == 0:
correct_prediction = tf.equal(tf.argmax(y_model, 1), tf.argmax(y_data, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(sess.run(accuracy, feed_dict={x_data: mnist.test.images, y_data: mnist.test.labels}))

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data mnist = input_data.read_data_sets("D:\\F\\TensorFlow_deep_learn\\MNIST\\", one_hot=True) x_data = tf.placeholder("float32", [None, 784]) weight1 = tf.Variable(tf.ones([784, 256]))
bias1 = tf.Variable(tf.ones([256]))
y1_model1 = tf.matmul(x_data, weight1) + bias1 weight2 = tf.Variable(tf.ones([256, 10]))
bias2 = tf.Variable(tf.ones([10]))
y_model = tf.nn.softmax(tf.matmul(y1_model1, weight2) + bias2) y_data = tf.placeholder("float32", [None, 10]) loss = -tf.reduce_sum(y_data*tf.log(y_model))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init) for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(50)
sess.run(train_step, feed_dict={x_data:batch_xs, y_data:batch_ys})
if _ % 50 == 0:
correct_prediction = tf.equal(tf.argmax(y_model, 1), tf.argmax(y_data, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(sess.run(accuracy, feed_dict={x_data: mnist.test.images, y_data: mnist.test.labels}))

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data mnist = input_data.read_data_sets("D:\\F\\TensorFlow_deep_learn\\MNIST\\", one_hot=True) x_data = tf.placeholder("float32", [None, 784])
x_image = tf.reshape(x_data, [-1,28,28,1]) w_conv = tf.Variable(tf.ones([5,5,1,32]))
b_conv = tf.Variable(tf.ones([32]))
h_conv = tf.nn.relu(tf.nn.conv2d(x_image, w_conv, strides=[1, 1, 1, 1], padding='SAME') + b_conv) h_pool = tf.nn.max_pool(h_conv, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME') w_fc = tf.Variable(tf.ones([14*14*32,1024]))
b_fc = tf.Variable(tf.ones([1024])) h_pool_flat = tf.reshape(h_pool, [-1, 14*14*32])
h_fc = tf.nn.relu(tf.matmul(h_pool_flat, w_fc) + b_fc) W_fc2 = tf.Variable(tf.ones([1024,10]))
b_fc2 = tf.Variable(tf.ones([10])) y_model = tf.nn.softmax(tf.matmul(h_fc, W_fc2) + b_fc2) y_data = tf.placeholder("float32", [None, 10]) loss = -tf.reduce_sum(y_data*tf.log(y_model))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init) for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(200)
sess.run(train_step, feed_dict={x_data:batch_xs, y_data:batch_ys})
if _ % 50 == 0:
correct_prediction = tf.equal(tf.argmax(y_model, 1), tf.argmax(y_data, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(sess.run(accuracy, feed_dict={x_data: mnist.test.images, y_data: mnist.test.labels}))

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data mnist = input_data.read_data_sets("D:\\F\\TensorFlow_deep_learn\\MNIST\\", one_hot=True) x_data = tf.placeholder("float", shape=[None, 784])
y_data = tf.placeholder("float", shape=[None, 10]) def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial) def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial) def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID') def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x_data, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1) W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2) W_fc1 = weight_variable([4 * 4 * 64, 1024])
b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 4*4*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10]) y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) cross_entropy = -tf.reduce_sum(y_data * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-2).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_data, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) sess = tf.Session()
sess.run(tf.initialize_all_variables()) for i in range(1000):
batch = mnist.train.next_batch(50)
if i%5 == 0:
train_accuracy = sess.run(accuracy, feed_dict={x_data:batch[0], y_data: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
sess.run(train_step, feed_dict={x_data: batch[0], y_data: batch[1], keep_prob: 0.5})

吴裕雄 python深度学习与实践(15)的更多相关文章

  1. 吴裕雄 python深度学习与实践(13)

    import numpy as np import matplotlib.pyplot as plt x_data = np.random.randn(10) print(x_data) y_data ...

  2. 吴裕雄 python深度学习与实践(18)

    # coding: utf-8 import time import numpy as np import tensorflow as tf import _pickle as pickle impo ...

  3. 吴裕雄 python深度学习与实践(17)

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import time # 声明输 ...

  4. 吴裕雄 python深度学习与实践(16)

    import struct import numpy as np import matplotlib.pyplot as plt dateMat = np.ones((7,7)) kernel = n ...

  5. 吴裕雄 python深度学习与实践(14)

    import numpy as np import tensorflow as tf import matplotlib.pyplot as plt threshold = 1.0e-2 x1_dat ...

  6. 吴裕雄 python深度学习与实践(12)

    import tensorflow as tf q = tf.FIFOQueue(,"float32") counter = tf.Variable(0.0) add_op = t ...

  7. 吴裕雄 python深度学习与实践(11)

    import numpy as np from matplotlib import pyplot as plt A = np.array([[5],[4]]) C = np.array([[4],[6 ...

  8. 吴裕雄 python深度学习与实践(10)

    import tensorflow as tf input1 = tf.constant(1) print(input1) input2 = tf.Variable(2,tf.int32) print ...

  9. 吴裕雄 python深度学习与实践(9)

    import numpy as np import tensorflow as tf inputX = np.random.rand(100) inputY = np.multiply(3,input ...

随机推荐

  1. ecmall 移动端 微信分享

    /* 用户判断是否在微信端 */ $this->assign('isWeixin', isWeixin()); //isWeixin() 在系统核心基础类的ecmall.php里定义好了 是微信 ...

  2. 关于Ctime库

    --------------------- 本文来自 Fuko_Ibuki 的CSDN 博客 ,全文地址请点击:https://blog.csdn.net/qq_31908675/article/de ...

  3. IDEA java编译中出现了Exception in thread “main" java.lang.UnsupportedClassVersionError

    这个问题确实是由较高版本的JDK编译的java class文件试图在较低版本的JVM上运行产生的错误. 在idea中需要修改的有两区,四个地方 1. 修改项目编译器 Crtl+Shift+A 进入如下 ...

  4. C语言求矩阵的逆

    #include "stdafx.h" #include <stdio.h> #include <stdlib.h> #include <window ...

  5. 编写 python 小程序,将LOL官网的皮肤保存下来,上传百度云,记录那些强撸灰飞烟灭的日子

    to 撸的血泪史:大学四年几乎都在宿舍打撸,So,把官网的皮肤都保存下来,存到百度云,就当一种纪念 编辑器:pycharm 用到的包:urllib.request, requests, json, r ...

  6. java-Calendar类

    1.Calendar类的概述和获取日期的方法 * A:Calendar类的概述 * Calendar 类是一个抽象类,它为特定瞬间与一组诸如 YEAR.MONTH.DAY_OF_MONTH.HOUR ...

  7. python 进程池的使用和坑

    from multiprocessing import Pool,Process import time,os def Foo(a):#创建函数 time.sleep(2) print('in the ...

  8. 学习笔记TF015:加载图像、图像格式、图像操作、颜色

    TensorFlow支持JPG.PNG图像格式,RGB.RGBA颜色空间.图像用与图像尺寸相同(height*width*chnanel)张量表示.通道表示为包含每个通道颜色数量标量秩1张量.图像所有 ...

  9. Dynamics CRM Publisher

    如果你想创建并且部署一个solution(解决方案),你需要创建一个publisher. 当准备创建一个solution并且创建entity field, relationship在这个solutio ...

  10. Mysql环境搭建(及中文乱码解决)

    卸载MySQL 电脑已经安装过mysql的 卸载电脑上的mysql方法: 我的电脑-->右键-->属性-->高级系统设置-->环境变量-->系统变量里面-->找到环 ...