1. import os
  2. import numpy as np
  3. import matplotlib.pyplot as plt
  4. from PIL import Image, ImageChops
  5. from skimage import color,data,transform,io
  6.  
  7. #获取所有数据文件夹名称
  8. fileList = os.listdir("F:\\data\\flowers")
  9. trainDataList = []
  10. trianLabel = []
  11. testDataList = []
  12. testLabel = []
  13.  
  14. #读取每一种花的文件
  15. for j in range(len(fileList)):
  16. data = os.listdir("F:\\data\\flowers\\"+fileList[j])
  17. #取每一种花四分之一的数据作为测试数据集
  18. testNum = int(len(data)*0.25)
  19. #把每种花的图片进行testNum次乱序处理
  20. while(testNum>0):
  21. np.random.shuffle(data)
  22. testNum -= 1
  23. #把每种花经过乱序后的四分之三当作训练集
  24. trainData = np.array(data[:-(int(len(data)*0.25))])
  25. #把每种花经过乱序后的四分之一当作测试集
  26. testData = np.array(data[-(int(len(data)*0.25)):])
  27. #从上面选出来的训练集中逐张读取出对应的图片
  28. for i in range(len(trainData)):
  29. #其中这些图片都要满足jpg格式的
  30. if(trainData[i][-3:]=="jpg"):
  31. #读取一张jpg图片
  32. image = io.imread("F:\\data\\flowers\\"+fileList[j]+"\\"+trainData[i])
  33. #把这张图片变成64*64大小的图片
  34. image=transform.resize(image,(64,64))
  35. #保存改变大小的图片到trainDataList列表
  36. trainDataList.append(image)
  37. #保存这张图片的标签到trianLabel列表
  38. trianLabel.append(int(j))
  39. #随机生成一个角度,这个角度的范围在顺时针90度和逆时针90度之间
  40. angle = np.random.randint(-90,90)
  41. #然后把上面那张64*64大小的图片随机旋转angle个角度
  42. image =transform.rotate(image, angle)
  43. #把旋转得到的新图片再变成64*64大小的,因为旋转会改变一张图片的大小
  44. image=transform.resize(image,(64,64))
  45. #把旋转后并且大小是64*64的图片保存到trainDataList列表
  46. trainDataList.append(image)
  47. #把旋转后并且大小是64*64的图片对应的标签保存到trianLabel列表
  48. trianLabel.append(int(j))
  49. #逐张读取每种花的测试图片
  50. for i in range(len(testData)):
  51. #选取的图片要满足jpg格式的
  52. if(testData[i][-3:]=="jpg"):
  53. #读取一张图片
  54. image = io.imread("F:\\data\\flowers\\"+fileList[j]+"\\"+testData[i])
  55. #改变这张图片的大小为64*64
  56. image=transform.resize(image,(64,64))
  57. #把改变后的图片保存到testDataList列表中
  58. testDataList.append(image)
  59. #把这张图片对应的标签保存到testLabel列表中
  60. testLabel.append(int(j))
  61. print("图片数据读取完了...")

  1. #打印训练集和测试数据集以及它们对应标签的规模
  2. print(np.shape(trainDataList))
  3. print(np.shape(trianLabel))
  4. print(np.shape(testDataList))
  5. print(np.shape(testLabel))

  1. #保存训练集和测试数据集以及它们对应标签到磁盘
  2. np.save("G:\\trainDataList",trainDataList)
  3. np.save("G:\\trianLabel",trianLabel)
  4. np.save("G:\\testDataList",testDataList)
  5. np.save("G:\\testLabel",testLabel)
  6. print("数据处理完了...")

  1. import numpy as np
  2. from keras.utils import to_categorical
  3.  
  4. #将训练数据集和测试数据集对应的标签转变为one-hot编码
  5. trainLabel = np.load("G:\\trianLabel.npy")
  6. testLabel = np.load("G:\\testLabel.npy")
  7. trainLabel_encoded = to_categorical(trainLabel)
  8. testLabel_encoded = to_categorical(testLabel)
  9. np.save("G:\\trianLabel",trainLabel_encoded)
  10. np.save("G:\\testLabel",testLabel_encoded)
  11. print("转码类别写盘完了...")

  1. import random
  2. import numpy as np
  3.  
  4. trainDataList = np.load("G:\\trainDataList.npy")
  5. trianLabel = np.load("G:\\trianLabel.npy")
  6. print("数据加载完了...")
  7.  
  8. trainIndex = [i for i in range(len(trianLabel))]
  9. random.shuffle(trainIndex)
  10. trainData = []
  11. trainClass = []
  12. for i in range(len(trainIndex)):
  13. trainData.append(trainDataList[trainIndex[i]])
  14. trainClass.append(trianLabel[trainIndex[i]])
  15. print("训练数据shuffle完了...")
  16.  
  17. np.save("G:\\trainDataList",trainData)
  18. np.save("G:\\trianLabel",trainClass)
  19. print("训练数据写盘完毕...")

  1. X = np.load("G:\\trainDataList.npy")
  2. Y = np.load("G:\\trianLabel.npy")
  3. print(np.shape(X))
  4. print(np.shape(Y))

  1. import random
  2. import numpy as np
  3.  
  4. testDataList = np.load("G:\\testDataList.npy")
  5. testLabel = np.load("G:\\testLabel.npy")
  6.  
  7. testIndex = [i for i in range(len(testLabel))]
  8. random.shuffle(testIndex)
  9. testData = []
  10. testClass = []
  11. for i in range(len(testIndex)):
  12. testData.append(testDataList[testIndex[i]])
  13. testClass.append(testLabel[testIndex[i]])
  14. print("测试数据shuffle完了...")
  15.  
  16. np.save("G:\\testDataList",testData)
  17. np.save("G:\\testLabel",testClass)
  18. print("测试数据写盘完毕...")

  1. X = np.load("G:\\testDataList.npy")
  2. Y = np.load("G:\\testLabel.npy")
  3. print(np.shape(X))
  4. print(np.shape(Y))
  5. print(np.shape(testData))
  6. print(np.shape(testLabel))

  1. import tensorflow as tf
  2. from random import shuffle
  3.  
  4. INPUT_NODE = 64*64
  5. OUT_NODE = 5
  6. IMAGE_SIZE = 64
  7. NUM_CHANNELS = 3
  8. NUM_LABELS = 5
  9.  
  10. #第一层卷积层的尺寸和深度
  11. CONV1_DEEP = 16
  12. CONV1_SIZE = 5
  13. #第二层卷积层的尺寸和深度
  14. CONV2_DEEP = 32
  15. CONV2_SIZE = 5
  16. #全连接层的节点数
  17. FC_SIZE = 512
  18.  
  19. def inference(input_tensor, train, regularizer):
  20. #卷积
  21. with tf.variable_scope('layer1-conv1'):
  22. conv1_weights = tf.Variable(tf.random_normal([CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],stddev=0.1),name='weight')
  23. tf.summary.histogram('convLayer1/weights1', conv1_weights)
  24. conv1_biases = tf.Variable(tf.Variable(tf.random_normal([CONV1_DEEP])),name="bias")
  25. tf.summary.histogram('convLayer1/bias1', conv1_biases)
  26. conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding='SAME')
  27. tf.summary.histogram('convLayer1/conv1', conv1)
  28. relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
  29. tf.summary.histogram('ConvLayer1/relu1', relu1)
  30. #池化
  31. with tf.variable_scope('layer2-pool1'):
  32. pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
  33. tf.summary.histogram('ConvLayer1/pool1', pool1)
  34. #卷积
  35. with tf.variable_scope('layer3-conv2'):
  36. conv2_weights = tf.Variable(tf.random_normal([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],stddev=0.1),name='weight')
  37. tf.summary.histogram('convLayer2/weights2', conv2_weights)
  38. conv2_biases = tf.Variable(tf.random_normal([CONV2_DEEP]),name="bias")
  39. tf.summary.histogram('convLayer2/bias2', conv2_biases)
  40. #卷积向前学习
  41. conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1,1,1,1],padding='SAME')
  42. tf.summary.histogram('convLayer2/conv2', conv2)
  43. relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
  44. tf.summary.histogram('ConvLayer2/relu2', relu2)
  45. #池化
  46. with tf.variable_scope('layer4-pool2'):
  47. pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
  48. tf.summary.histogram('ConvLayer2/pool2', pool2)
  49. #变型
  50. pool_shape = pool2.get_shape().as_list()
  51. #计算最后一次池化后对象的体积(数据个数\节点数\像素个数)
  52. nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]
  53. #根据上面的nodes再次把最后池化的结果pool2变为batch行nodes列的数据
  54. reshaped = tf.reshape(pool2,[-1,nodes])
  55.  
  56. #全连接层
  57. with tf.variable_scope('layer5-fc1'):
  58. fc1_weights = tf.Variable(tf.random_normal([nodes,FC_SIZE],stddev=0.1),name='weight')
  59. if(regularizer != None):
  60. tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.03)(fc1_weights))
  61. fc1_biases = tf.Variable(tf.random_normal([FC_SIZE]),name="bias")
  62. #预测
  63. fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)
  64. if(train):
  65. fc1 = tf.nn.dropout(fc1,0.5)
  66. #全连接层
  67. with tf.variable_scope('layer6-fc2'):
  68. fc2_weights = tf.Variable(tf.random_normal([FC_SIZE,64],stddev=0.1),name="weight")
  69. if(regularizer != None):
  70. tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.03)(fc2_weights))
  71. fc2_biases = tf.Variable(tf.random_normal([64]),name="bias")
  72. #预测
  73. fc2 = tf.nn.relu(tf.matmul(fc1,fc2_weights)+fc2_biases)
  74. if(train):
  75. fc2 = tf.nn.dropout(fc2,0.5)
  76. #全连接层
  77. with tf.variable_scope('layer7-fc3'):
  78. fc3_weights = tf.Variable(tf.random_normal([64,NUM_LABELS],stddev=0.1),name="weight")
  79. if(regularizer != None):
  80. tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.03)(fc3_weights))
  81. fc3_biases = tf.Variable(tf.random_normal([NUM_LABELS]),name="bias")
  82. #预测
  83. logit = tf.matmul(fc2,fc3_weights)+fc3_biases
  84. return logit
  1. import time
  2. import keras
  3. import numpy as np
  4. from keras.utils import np_utils
  5.  
  6. X = np.load("G:\\trainDataList.npy")
  7. Y = np.load("G:\\trianLabel.npy")
  8. print(np.shape(X))
  9. print(np.shape(Y))
  10. print(np.shape(testData))
  11. print(np.shape(testLabel))
  12.  
  13. batch_size = 10
  14. n_classes=5
  15. epochs=16#循环次数
  16. learning_rate=1e-4
  17. batch_num=int(np.shape(X)[0]/batch_size)
  18. dropout=0.75
  19.  
  20. x=tf.placeholder(tf.float32,[None,64,64,3])
  21. y=tf.placeholder(tf.float32,[None,n_classes])
  22. # keep_prob = tf.placeholder(tf.float32)
  23. #加载测试数据集
  24. test_X = np.load("G:\\testDataList.npy")
  25. test_Y = np.load("G:\\testLabel.npy")
  26. back = 64
  27. ro = int(len(test_X)/back)
  28.  
  29. #调用神经网络方法
  30. pred=inference(x,1,"regularizer")
  31. cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
  32.  
  33. # 三种优化方法选择一个就可以
  34. optimizer=tf.train.AdamOptimizer(1e-4).minimize(cost)
  35. # train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
  36. # train_step = tf.train.MomentumOptimizer(0.001,0.9).minimize(cost)
  37.  
  38. #将预测label与真实比较
  39. correct_pred=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
  40. #计算准确率
  41. accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))
  42. merged=tf.summary.merge_all()
  43. #将tensorflow变量实例化
  44. init=tf.global_variables_initializer()
  45. start_time = time.time()
  46.  
  47. with tf.Session() as sess:
  48. sess.run(init)
  49. #保存tensorflow参数可视化文件
  50. writer=tf.summary.FileWriter('F:/Flower_graph', sess.graph)
  51. for i in range(epochs):
  52. for j in range(batch_num):
  53. offset = (j * batch_size) % (Y.shape[0] - batch_size)
  54. # 准备数据
  55. batch_data = X[offset:(offset + batch_size), :]
  56. batch_labels = Y[offset:(offset + batch_size), :]
  57. sess.run(optimizer, feed_dict={x:batch_data,y:batch_labels})
  58. result=sess.run(merged, feed_dict={x:batch_data,y:batch_labels})
  59. writer.add_summary(result, i)
  60. loss,acc = sess.run([cost,accuracy],feed_dict={x:batch_data,y:batch_labels})
  61. print("Epoch:", '%04d' % (i+1),"cost=", "{:.9f}".format(loss),"Training accuracy","{:.5f}".format(acc*100))
  62. writer.close()
  63. print("########################训练结束,下面开始测试###################")
  64. for i in range(ro):
  65. s = i*back
  66. e = s+back
  67. test_accuracy = sess.run(accuracy,feed_dict={x:test_X[s:e],y:test_Y[s:e]})
  68. print("step:%d test accuracy = %.4f%%" % (i,test_accuracy*100))
  69. print("Final test accuracy = %.4f%%" % (test_accuracy*100))
  70.  
  71. end_time = time.time()
  72. print('Times:',(end_time-start_time))
  73. print('Optimization Completed')

吴裕雄--天生自然 Tensorflow卷积神经网络:花朵图片识别的更多相关文章

  1. 吴裕雄--天生自然TensorFlow高层封装:使用TFLearn处理MNIST数据集实现LeNet-5模型

    # 1. 通过TFLearn的API定义卷机神经网络. import tflearn import tflearn.datasets.mnist as mnist from tflearn.layer ...

  2. 吴裕雄--天生自然TensorFlow高层封装:使用TensorFlow-Slim处理MNIST数据集实现LeNet-5模型

    # 1. 通过TensorFlow-Slim定义卷机神经网络 import numpy as np import tensorflow as tf import tensorflow.contrib. ...

  3. 吴裕雄--天生自然TensorFlow高层封装:Estimator-自定义模型

    # 1. 自定义模型并训练. import numpy as np import tensorflow as tf from tensorflow.examples.tutorials.mnist i ...

  4. 吴裕雄--天生自然TensorFlow高层封装:Estimator-DNNClassifier

    # 1. 模型定义. import numpy as np import tensorflow as tf from tensorflow.examples.tutorials.mnist impor ...

  5. 吴裕雄--天生自然TensorFlow高层封装:Keras-TensorFlow API

    # 1. 模型定义. import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist_ ...

  6. 吴裕雄--天生自然TensorFlow高层封装:Keras-RNN

    # 1. 数据预处理. from keras.layers import LSTM from keras.datasets import imdb from keras.models import S ...

  7. 吴裕雄--天生自然TensorFlow高层封装:Keras-CNN

    # 1. 数据预处理 import keras from keras import backend as K from keras.datasets import mnist from keras.m ...

  8. 吴裕雄 python 神经网络——TensorFlow 卷积神经网络水果图片识别

    #-*- coding:utf- -*- import time import keras import skimage import numpy as np import tensorflow as ...

  9. 吴裕雄--天生自然TensorFlow高层封装:Keras-多输入输出

    # 1. 数据预处理. import keras from keras.models import Model from keras.datasets import mnist from keras. ...

随机推荐

  1. Java 实现 栈

    package Test; import java.util.*; public class Stack_test { public static void main(String[] args) { ...

  2. MySQL--存储引擎的特性

    常用存储引擎的对比 特点 MyISAM InnoDB MEMORY MERGE NDB 存储限制 有 64TB 有 没有 有 事务安全   支持       锁机制 表锁 行锁 表锁 表锁 行锁 B树 ...

  3. python-day6爬虫基础之会话、Cookies、代理

    由于前几天看电脑时间过长,在昨天的时候,两个眼睛就有点疼痛感觉,所以昨天晚上就没有学习,博客也没有跟着写,今早去校医院买了点药,上午把老师要求的电路板画完了,已经发出去做了,现在闲来无事,看了一会关于 ...

  4. 剑指offer【09】- 跳台阶

    题目:一只青蛙一次可以跳上1级台阶,也可以跳上2级.求该青蛙跳上一个n级的台阶总共有多少种跳法(先后次序不同算不同的结果). 对于本题,前提只有 一次 1阶或者2阶的跳法. a.如果两种跳法,1阶或者 ...

  5. android studio 导入eclipse项目后的报错解决

    1.如何导入: 2.导入的时候会让你创建gradle,一直下一步,不用修改 3.编译, a.会报编码格式错误,如果有中文会报这个错,修改成utf-8 b.找不到类,解决办法 右键工程 引入外部的包,重 ...

  6. Python字符串与列表

  7. NSIS增加打包文件夹

    File /r "F:\rainwayDemo\exploer\RailwayLinesExploer\bin64\log" File  /r "models" ...

  8. 2019-2020-1 20199324《Linux内核原理与分析》第七周作业

    第六章 进程的描述和进程的创建 知识点总结 进程的描述 操作系统内核实现操作系统的三大管理功能以及对应的抽象概念: 进程管理(最核心)-- 进程 内存管理 -- 虚拟内存 文件系统 -- 文件 进程是 ...

  9. TimeHelper

    import java.time.Duration; import java.time.Instant; import java.time.OffsetDateTime; import java.ti ...

  10. VisitsService

    package me.zhengjie.monitor.domain; import lombok.Data; import org.hibernate.annotations.CreationTim ...