python DNN
exam_relu_softmax_dnn
''' 문) bmi.csv 데이터셋을 이용하여 다음과 같이 DNN모델을 생성하시오. 조건1> X,Y변수 -> X변수 : height, weight 칼럼 -> Y변수 : label 칼럼 조건2> DNN Layer Hidden lyaer1 node 수 = 24개 Hidden lyaer2 node 수 = 12개 조건3> 1,000번 학습, 100 step 단위로 Cost 출력 조건4> 분류정확도(Accuracy) 출력 ''' import pandas as pd import numpy as np import tensorflow as tf from sklearn import metrics from sklearn.model_selection import train_test_split bmi = pd.read_csv('../data/bmi.csv') print(bmi.info()) # 칼럼 추출 col = list(bmi.columns) print(col) # x,y 변수 추출 x_data = bmi[col[:2]] # x변수 y_data = bmi[col[-1]] # y변수 # x변수 정규화 안하면 - [ nan] def data_nor(data) : dmax = data.max() dmin = data.min() return (data - dmin) / (dmax- dmin) x_data = data_nor(x_data) print(x_data) # y변수 one-hot-encoding y_label = [] for y in y_data : if y == "thin" : y_label.append([1,0,0]) if y == "normal" : y_label.append([0,1,0]) if y == "fat" : y_label.append([0,0,1]) y_data = np.array(y_label) print(y_data.shape) #(150, 3) print(y_data[:5]) # 앞부분 5개 print(y_data[-5:]) # 뒷부분 5개 print(x_data.shape) # (20000, 2) print(y_data.shape) # (20000, 3) # train/test split train_x, test_x, train_y, test_y = train_test_split( x_data, y_data, test_size=0.2, random_state=123) # x, y변수 선언 X = tf.placeholder(tf.float32, [None, 2]) # 키와 몸무게 Y = tf.placeholder(tf.float32, [None, 3]) # 정답 레이블 ############################## ## DNN layers ############################## hidden1_nodes = 24 hidden2_nodes = 12 # Hidden layer1 W1 = tf.Variable(tf.random_normal([2, hidden1_nodes])) # 1층:[X_in,out] b1 = tf.Variable(tf.random_normal([hidden1_nodes])) # [out] hidden1 = tf.nn.relu(tf.matmul(X, W1) + b1) # hidden1 output # Hidden layer2 W2 = tf.Variable(tf.random_normal([hidden1_nodes, hidden2_nodes])) # 2층 :[in,out] b2 = tf.Variable(tf.random_normal([hidden2_nodes])) # [out] hidden2 = tf.nn.relu(tf.matmul(hidden1, W2) + b2) # hidden2 output # Output layer W3 = tf.Variable(tf.random_normal([hidden2_nodes, 3])) # 3층 :[in,Y_out] b3 = tf.Variable(tf.random_normal([3])) # [out] model = tf.matmul(hidden2, W3) + b3 # output model ''' node 수 = layer 증가에 따라서 node수 증가, 출력층에 가까울 수록 node수 감소 ''' # 2. cost function : softmax + cross entropy cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2( logits=model, labels=Y)) # 3. 경사감소법 알고리즘 : step=0.01 train= tf.train.GradientDescentOptimizer(0.01).minimize(cost) # 4. 결과 확인 predict = tf.argmax(model, 1) # model 예측치 - 가장 높은 확률 index 반환 label = tf.argmax(Y, 1) # Y변수 1 index 반환 ## 세션 생성 with tf.Session() as sess : sess.run(tf.global_variables_initializer()) ## 분류모델 학습 for step in range(1000): # 500번 학습[0.93] - 1000번 학습[0.96] feed_data = {X: train_x, Y: train_y} _, cost_val = sess.run([train, cost], feed_dict=feed_data) if (step+1) % 100 == 0: print('step=', step+1, 'cost=', cost_val) # Accuracy report feed_data = {X: test_x, Y: test_y} predicted, y_label = sess.run([predict,label], feed_dict=feed_data ) print("\n Predicted:\n", predicted) print("\n y label:\n", y_label) acc = metrics.accuracy_score(y_label, predicted) print('accuracy = ', acc) ''' Predicted: [0 2 1 ... 2 2 0] y label: [0 2 1 ... 2 1 0] accuracy = 0.86125 '''
step01_relu_softmax_ann
# -*- coding: utf-8 -*- """ ANN Model - 1개 은닉층을 갖는 분류기 - input layer(4개) : matmul(X * w) - hidden layer(3 node) : relu() - output layer(3 domin) : softmax() """ import tensorflow as tf import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn import metrics iris = load_iris() x_data = iris.data # 4개 y_data = iris.target # 1개 print(y_data) # 0, 1, 2 -> [1, 0, 0] # x변수 정규화(0~1) def data_nor(data) : dmax = data.max() dmin = data.min() return (data - dmin) / (dmax - dmin) # 함수 호출 x_data = data_nor(x_data) # one hot encoding y_label = [] # 빈list for y in y_data : if y == 0 : y_label.append([1,0,0]) if y == 1 : y_label.append([0,1,0]) if y == 2 : y_label.append([0,0,1]) y_data = np.array(y_label) # X,Y,w,b 변수 정의 X = tf.placeholder(tf.float32, [None, 4]) # 2차원 Y = tf.placeholder(tf.float32, [None, 3]) # 2차원 ######################### ### ANN Layers ######################### hidden_nodes = 3 # node=유닛=뉴런 # Hidden layer w1 = tf.Variable(tf.random_normal([4, hidden_nodes])) # [input, output] b1 = tf.Variable(tf.random_normal([hidden_nodes])) # node == b # Output layer w2 = tf.Variable(tf.random_normal([hidden_nodes, 3])) # [input, output] b2 = tf.Variable(tf.random_normal([3])) # [final output] # 1. model : (X * w1) + b1 model = tf.matmul(X, w1) + b1 # 2. hidden layer : relu() hidden_output = tf.nn.relu(model) # 3. output layer : softmax() final_model = tf.matmul(hidden_output, w2) + b2 # 4. cost = softmax + entropy cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits = final_model, labels = Y)) # 5. 경사하강법 train = tf.train.AdamOptimizer(0.01).minimize(cost) # 6. 결과 확인 predict = tf.arg_max(model, 1) # [0.98, 0.01, 0.01]-> 0 최댓값의 index 반환 label = tf.arg_max(Y, 1) # [1, 0, 0] -> 0 with tf.Session() as sess : sess.run(tf.global_variables_initializer()) # w,b 초기화 feed_data = {X : x_data, Y : y_data} # 학습용 for step in range(1000) : _, cost_val = sess.run([train, cost], feed_dict = feed_data) if ((step+1) % 100 == 0): print('step=', (step+1), 'cost =', cost_val) # 최적화 model test #feed_data = {X : test_x, Y : test_y} # 평가용 predict_re, label_re = sess.run([predict, label], feed_dict = feed_data) # T/F -> 1/0 -> mean acc = tf.reduce_mean(tf.cast(tf.equal(predict_re, label_re), tf.float32)) print('accuracy =', sess.run(acc, feed_dict = feed_data)) # accuracy = 0.97333336 print('predict=', predict_re) print('label=', label_re)
step02_relu_sotfmax_dnn
# -*- coding: utf-8 -*- """ DNN Model - Input layer : 4개 - Hidden layer(2개) -> H1(12) -> H2(6) - Output layer : 3개(domain) """ import tensorflow as tf import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn import metrics iris = load_iris() x_data = iris.data # 4개 y_data = iris.target # 1개 print(y_data) # 0, 1, 2 -> [1, 0, 0] # x변수 정규화(0~1) def data_nor(data) : dmax = data.max() dmin = data.min() return (data - dmin) / (dmax - dmin) # 함수 호출 x_data = data_nor(x_data) # one hot encoding y_label = [] # 빈list for y in y_data : if y == 0 : y_label.append([1,0,0]) if y == 1 : y_label.append([0,1,0]) if y == 2 : y_label.append([0,0,1]) y_data = np.array(y_label) # train/test split(8:2) train_x, test_x, train_y, test_y = train_test_split( x_data, y_data, test_size=0.2, random_state=123) # X,Y,w,b 변수 정의 X = tf.placeholder(tf.float32, [None, 4]) # 2차원 Y = tf.placeholder(tf.float32, [None, 3]) # 2차원 ###################### ## DNN Layers ###################### hidden1_nodes = 12 hidden2_nodes = 6 # 출력층에 가까울 수록 Node 수 감소 # Hidden layer1 : 1층[input, H1] w1 = tf.Variable(tf.random_normal([4, hidden1_nodes])) # [input, output] b1 = tf.Variable(tf.random_normal([hidden1_nodes])) hidden1_output = tf.nn.relu(tf.matmul(X, w1) + b1) # Hidden layer2 : 2층[H1 -> OUT] w2 = tf.Variable(tf.random_normal([hidden1_nodes, hidden2_nodes])) b2 = tf.Variable(tf.random_normal([hidden2_nodes])) hidden2_output = tf.nn.relu( tf.matmul(hidden1_output, w2) + b2) # Output layer : 3층[H2 -> domain] w3 = tf.Variable(tf.random_normal([hidden2_nodes, 3])) b3 = tf.Variable(tf.random_normal([3])) model = tf.matmul(hidden2_output, w3) + b3 # 4. cost = softmax + entropy cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits = model, labels = Y)) # 5. 경사하강법 train = tf.train.AdamOptimizer(0.01).minimize(cost) # 6. 결과 확인 predict = tf.arg_max(model, 1) # [0.98, 0.01, 0.01]-> 0 최댓값의 index 반환 label = tf.arg_max(Y, 1) # [1, 0, 0] -> 0 with tf.Session() as sess : sess.run(tf.global_variables_initializer()) # w,b 초기화 feed_data = {X : x_data, Y : y_data} # 학습용 for step in range(1000) : _, cost_val = sess.run([train, cost], feed_dict = feed_data) if ((step+1) % 100 == 0): print('step=', (step+1), 'cost =', cost_val) # 최적화 model test #feed_data = {X : test_x, Y : test_y} # 평가용 predict_re, label_re = sess.run([predict, label], feed_dict = feed_data) # T/F -> 1/0 -> mean acc = tf.reduce_mean(tf.cast(tf.equal(predict_re, label_re), tf.float32)) print('accuracy =', sess.run(acc, feed_dict = feed_data)) # accuracy = 0.9866667 print('predict=', predict_re) print('label=', label_re)
python DNN的更多相关文章
- DNN的BP算法Python简单实现
BP算法是神经网络的基础,也是最重要的部分.由于误差反向传播的过程中,可能会出现梯度消失或者爆炸,所以需要调整损失函数.在LSTM中,通过sigmoid来实现三个门来解决记忆问题,用tensorflo ...
- 通过Python包来剪枝、蒸馏DNN
用 Distiller 压缩 PyTorch 模型 作者: PyTorch 中文网发布: 2018年7月15日 5,101阅读 0评论 近日,Intel 开源了一个用于神经网络压缩的开源 Python ...
- DNN网络(三)python下用Tensorflow实现DNN网络以及Adagrad优化器
摘自: https://www.kaggle.com/zoupet/neural-network-model-for-house-prices-tensorflow 一.实现功能简介: 本文摘自Kag ...
- python pytorch numpy DNN 线性回归模型
1.直接奉献代码,后期有入门更新,之前一直在学的是TensorFlow, import torch from torch.autograd import Variable import torch.n ...
- python多种格式数据加载、处理与存储
多种格式数据加载.处理与存储 实际的场景中,我们会在不同的地方遇到各种不同的数据格式(比如大家熟悉的csv与txt,比如网页HTML格式,比如XML格式),我们来一起看看python如何和这些格式的数 ...
- Python之路【第七篇续】:I/O多路复用
回顾原生Socket 一.Socket起源: socket起源于Unix,而Unix/Linux基本哲学之一就是“一切皆文件”,对于文件用[打开][读写][关闭]模式来操作. socket就是该模式的 ...
- 30个深度学习库:按Python、C++、Java、JavaScript、R等10种语言分类
30个深度学习库:按Python.C++.Java.JavaScript.R等10种语言分类 包括 Python.C++.Java.JavaScript.R.Haskell等在内的一系列编程语言的深度 ...
- CNN(卷积神经网络)、RNN(循环神经网络)、DNN(深度神经网络)的内部网络结构有什么区别?
https://www.zhihu.com/question/34681168 CNN(卷积神经网络).RNN(循环神经网络).DNN(深度神经网络)的内部网络结构有什么区别?修改 CNN(卷积神经网 ...
- 【读书笔记与思考】《python数据分析与挖掘实战》-张良均
[读书笔记与思考]<python数据分析与挖掘实战>-张良均 最近看一些机器学习相关书籍,主要是为了拓宽视野.在阅读这本书前最吸引我的地方是实战篇,我通读全书后给我印象最深的还是实战篇.基 ...
随机推荐
- Python爬虫之三
1)使用Scrapy,什么叫做Scrapy Scrapy,Python开发的一个快速.高层次的屏幕抓取和web抓取框架,用于抓取web站点并从页面中提取结构化的数据.Scrapy用途广泛,可以用于数据 ...
- JMeter 下载
测试文件下载接口,jmeter返回的是字节流,所以jmeter本身是不支持将文件保存到本地的 怎么判断服务器有没有完全返回?response header头里面有一个content-lenth,添加断 ...
- Java实验报告
package sadsada; import java.util.Scanner; import java.util.Arrays; public class student { public st ...
- 第九周博客作业<西北师范大学|李晓婷>
1.助教博客链接:https://home.cnblogs.com/u/lxt-/ 2.作业要求博客链接:https://www.cnblogs.com/nwnu-daizh/p/10726884.h ...
- java8 日期时间解析与转换
Instant now = Instant.now(); DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM ...
- NFV-based QoS provision for Software Defined Optical Access and residential networks
文章名称:NFV-based QoS provision for Software Defined Optical Access and residential networks 发表时间:2017 ...
- 使用Kubeadm部署Kubernetes1.14.1集群
一.环境说明 主机名 IP地址 角色 系统 k8s-node-1 192.170.38.80 k8s-master Centos7.6 k8s-node-2 192.170.38.81 k8s-nod ...
- [Android] Android Build 时报错: java.io.IOException: Could not parse XML from android/accounts/annotations.xml
Android构建时报错: app:lintVitalRelease[Fatal Error] :3:214: 与元素类型 “item” 相关联的 “name” 属性值不能包含 ‘<’ 字符. ...
- python学习08
python中的异常处理 1.格式 try 语句块 except else finally else 是如果try语句没有异常,就执行,否则不执行 finally 不管程序是否异常,都会执行. 2.异 ...
- Spring Cloud微服务实践之路- Eureka Server 中的第一个异常
EMERGENCY! EUREKA MAY BE INCORRECTLY CLAIMING INSTANCES ARE UP WHEN THEY'RE NOT. RENEWALS ARE LESSER ...