#电池老化率测定的神经网络模型
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd path = 'SOH_Data.xlsx'
#训练集读取及归一化
xTrainData = pd.read_excel(path, sheetname = 0)
yTrainData = pd.read_excel(path, sheetname = 1)
n1 = np.shape(xTrainData)[1]
x_data = np.array(xTrainData).astype('float32')
for i in range(n1):
x_data[:, i] = (x_data[:, i] - np.amin(x_data[:, i]))/(np.amax(x_data[:, i]) - np.amin(x_data[:, i]))
y_data = np.array(yTrainData).astype('float32')
y_data[:] = (y_data[:] - np.amin(y_data[:]))/(np.amax(y_data[:]) - np.amin(y_data[:])) #测试集读取及归一化
xTestData = pd.read_excel(path, sheetname = 2)
yTestData = pd.read_excel(path, sheetname = 3)
xTest = np.array(xTestData).astype('float32')
n2 = np.shape(xTrainData)[1]
xTrain = np.array(xTrainData).astype('float32')
for i in range(n2):
xTest[:, i] = (xTest[:, i] - np.amin(xTest[:, i]))/(np.amax(xTest[:, i]) - np.amin(xTest[:, i]))
yTest = np.array(yTestData).astype('float32')
yTest[:] = (yTest[:] - np.amin(yTest[:]))/(np.amax(yTest[:]) - np.amin(yTest[:])) #参数概要
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)#平均值
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)#标准差
tf.summary.scalar('max', tf.reduce_max(var))#最大值
tf.summary.scalar('min', tf.reduce_min(var))#最小值
tf.summary.histogram('histogram', var)#直方图 #5层神经网络,每层神经元个数
IHO = [12, 8, 5, 4, 1] #命名空间
with tf.name_scope('input'):
#定义两个placeholder
x = tf.placeholder(tf.float32, [None, 12], name = 'xInput')
y = tf.placeholder(tf.float32, [None, 1], name = 'y') #神经元中间层
with tf.name_scope('layer'):
with tf.name_scope('weights_L1'):
Weight_L1 = tf.Variable(tf.random_normal([12, 8]), name = 'W1')
variable_summaries(Weight_L1)
with tf.name_scope('bias_L1'):
biases_L1 = tf.Variable(tf.zeros([8]), name = 'b1')
variable_summaries(biases_L1)
with tf.name_scope('L_1'):
Wx_plus_b_L1 = tf.matmul(x, Weight_L1) + biases_L1
L1 = tf.nn.tanh(Wx_plus_b_L1) with tf.name_scope('weights_L2'):
Weight_L2 = tf.Variable(tf.random_normal([8, 5]), name = 'W2')
variable_summaries(Weight_L2)
with tf.name_scope('bias_L2'):
biases_L2 = tf.Variable(tf.zeros([5]), name = 'b2')
variable_summaries(biases_L2)
with tf.name_scope('L_2'):
Wx_plus_b_L2 = tf.matmul(L1, Weight_L2) + biases_L2
L2 = tf.nn.tanh(Wx_plus_b_L2) with tf.name_scope('weights_L3'):
Weight_L3 = tf.Variable(tf.random_normal([5, 4]), name = 'W3')
variable_summaries(Weight_L3)
with tf.name_scope('bias_L3'):
biases_L3 = tf.Variable(tf.zeros([4]), name = 'b3')
variable_summaries(biases_L3)
with tf.name_scope('L_3'):
Wx_plus_b_L3 = tf.matmul(L2, Weight_L3) + biases_L3
L3 = tf.nn.tanh(Wx_plus_b_L3)
#神经元输出层
with tf.name_scope('weights_L4'):
Weight_L4 = tf.Variable(tf.random_normal([4, 1]), name = 'W4')
variable_summaries(Weight_L4)
with tf.name_scope('bias_L4'):
biases_L4 = tf.Variable(tf.zeros([1]), name = 'b4')
variable_summaries(biases_L4)
with tf.name_scope('prediction'):
Wx_plus_b_L4 = tf.matmul(L3, Weight_L4) + biases_L4
prediction = tf.nn.tanh(Wx_plus_b_L4) #二次代价函数
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.square(y - prediction), name = 'loss')
tf.summary.scalar('loss', loss)
#使用梯度下降法训练
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss) #合并所有summary
merged = tf.summary.merge_all()
with tf.Session() as sess:
#变量初始化
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('logs/', sess.graph)
for i in range(10000):
summary, _ = sess.run([merged, train_step], feed_dict = {x: x_data, y: y_data})
writer.add_summary(summary, i)
curr_loss = sess.run(loss, feed_dict = {x: x_data, y: y_data})
if (i + 1)%100 == 0:
print('第%d次迭代loss:'%(i + 1), curr_loss)
#训练集预测集
prediction_value = sess.run(prediction, feed_dict = {x: x_data})
#测试集预测集
prediction_value_test = sess.run(prediction, feed_dict = {x: xTest})
test_loss = sess.run(loss, feed_dict = {x: xTest, y: yTest})
print('测试误差:', test_loss)
print(prediction_value_test)
#电池老化率测定的神经网络模型
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from numpy import *
import os
np.set_printoptions(suppress=True)
np.set_printoptions(threshold=np.NaN)
BATCH_SIZE = 256
with open("D:\\bs\\finall_data\\marry2.11\\train\\nc_all.txt","rb") as fa,open("D:\\bs\\finall_data\\marry2.11\\train\\calipso_all.txt","rb") as fb,open("D:\\bs\\finall_data\\marry2.11\\test\\nc_text.txt","rb") as fc,open("D:\\bs\\finall_data\\marry2.11\\test\\calipso_text.txt","rb") as fd: #训练集读取及归一化
# xTrainData = pd.read_excel(path, sheetname = 0)
# yTrainData = pd.read_excel(path, sheetname = 1)
# # print(xTrainData)
# # print(yTrainData)
# n1 = np.shape(xTrainData)[1]
# # print(n1)
# x_data = np.array(xTrainData).astype('float32')
# print(x_data) #
# def Polyfit(x, y, degree):
# results = {}
# coeffs = np.polyfit(x, y, degree)
# # results['polynomial'] = coeffs.tolist()
#
# # r-squared
# p = np.poly1d(coeffs)
# # print(p)
# # # fit values, and mean
# # yhat = p(x) # or [p(z) for z in x]
# # ybar = np.sum(y) / len(y) # or sum(y)/len(y)
# # ssreg = np.sum((yhat - ybar) ** 2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
# # sstot = np.sum((y - ybar) ** 2) # or sum([ (yi - ybar)**2 for yi in y])
# # results['determination'] = ssreg / sstot # 准确率
# return results
list_x = []
for i in fa.readlines():
# print(str(i))
x_data_1 = str(i).split(" ")[2:18]
for x_data_12 in x_data_1:
x_data_12=float(x_data_12)
list_x.append(x_data_12) mat_x = mat(list_x)
x_data = mat_x.reshape(-1, 16)
for i in range(16):
x_data[i, :] = (x_data[i, :] - np.amin(x_data[i, :])) / (np.amax(x_data[i, :]) - np.amin(x_data[i, :])) list_y=[]
for v in fb.readlines():
y_data_1 = str(v).split(" ")[2].split(" ")[0]
y_data_1=1/(1+float(y_data_1))
# print(y_data)
list_y.append(float(y_data_1))
# print(list_y)
mat_y=mat(list_y)
y_data = mat_y.reshape(-1, 1)
# print(y_data) # y_data = np.array(yTrainData).astype('float32')
# y_data[:] = (y_data[:] - np.amin(y_data[:]))/(np.amax(y_data[:]) - np.amin(y_data[:]))
# #
# # print(y_data[:])
# z1 = Polyfit(x_data, y_data, 2)
# plt.plot(x_data, y_data, 'o')
# # plt.plot(x_data, np.polyval(z1, x_data))
# plt.show()
# # #测试集读取及归一化
list_t_x = []
for m in fc.readlines():
x_data_t_1 = str(m).split(" ")[2:18]
for x_data_t_12 in x_data_t_1:
# print(x_data_t_12)
x_data_t_12 = float(x_data_t_12)
list_t_x.append(x_data_t_12) mat_t_x = mat(list_t_x)
xTest = mat_t_x.reshape(-1, 16)
# print(xTest.shape) #(1598,16)
# xTestData = pd.read_excel(path, sheetname = 2)
# yTestData = pd.read_excel(path, sheetname = 3)
# xTest = np.array(xTestData).astype('float32')
# n2 = np.shape(xTrainData)[1]
# xTrain = np.array(xTrainData).astype('float32')
for i in range(16):
xTest[i, :] = (xTest[i, :] - np.amin(xTest[i, :]))/(np.amax(xTest[i, :]) - np.amin(xTest[i, :])) list_t_y = []
for n in fd.readlines():
y_data_t_1 = str(n).split(" ")[2].split(" ")[0]
# print(y_data)
y_data_t_1=1/(1+float(y_data_t_1))
list_t_y.append(float(y_data_t_1))
# print(list_y)
mat_t_y = mat(list_t_y)
yTest = mat_t_y.reshape(-1, 1)
# print(yTest) # yTest = np.array(yTestData).astype('float32')
# yTest[:] = (yTest[:] - np.amin(yTest[:]))/(np.amax(yTest[:]) - np.amin(yTest[:]))
# print(np.amax(yTest[:]))
# print(np.amax(y_Test[:])) #参数概要
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)#平均值
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)#标准差
tf.summary.scalar('max', tf.reduce_max(var))#最大值
tf.summary.scalar('min', tf.reduce_min(var))#最小值
tf.summary.histogram('histogram', var)#直方图 #5层神经网络,每层神经元个数
IHO = [16, 8, 5, 4, 1] #命名空间
with tf.name_scope('input'):
#定义两个placeholder
x = tf.placeholder(tf.float32, [None, 16], name = 'xInput')
y = tf.placeholder(tf.float32, [None, 1], name = 'y') #神经元中间层
with tf.name_scope('layer'):
with tf.name_scope('weights_L1'):
Weight_L1 = tf.Variable(tf.random_normal([16, 8]), name = 'W1')
variable_summaries(Weight_L1)
with tf.name_scope('bias_L1'):
biases_L1 = tf.Variable(tf.zeros([8]), name = 'b1')
variable_summaries(biases_L1)
with tf.name_scope('L_1'):
Wx_plus_b_L1 = tf.matmul(x, Weight_L1) + biases_L1
L1 = tf.nn.sigmoid(Wx_plus_b_L1) with tf.name_scope('weights_L2'):
Weight_L2 = tf.Variable(tf.random_normal([8, 5]), name = 'W2')
variable_summaries(Weight_L2)
with tf.name_scope('bias_L2'):
biases_L2 = tf.Variable(tf.zeros([5]), name = 'b2')
variable_summaries(biases_L2)
with tf.name_scope('L_2'):
Wx_plus_b_L2 = tf.matmul(L1, Weight_L2) + biases_L2
L2 = tf.nn.sigmoid(Wx_plus_b_L2) with tf.name_scope('weights_L3'):
Weight_L3 = tf.Variable(tf.random_normal([5, 4]), name = 'W3')
variable_summaries(Weight_L3)
with tf.name_scope('bias_L3'):
biases_L3 = tf.Variable(tf.zeros([4]), name = 'b3')
variable_summaries(biases_L3)
with tf.name_scope('L_3'):
Wx_plus_b_L3 = tf.matmul(L2, Weight_L3) + biases_L3
L3 = tf.nn.sigmoid(Wx_plus_b_L3)
#神经元输出层
with tf.name_scope('weights_L4'):
Weight_L4 = tf.Variable(tf.random_normal([4, 1]), name = 'W4')
variable_summaries(Weight_L4)
with tf.name_scope('bias_L4'):
biases_L4 = tf.Variable(tf.zeros([1]), name = 'b4')
variable_summaries(biases_L4)
with tf.name_scope('prediction'):
Wx_plus_b_L4 = tf.matmul(L3, Weight_L4) + biases_L4
prediction = tf.nn.sigmoid(Wx_plus_b_L4) #二次代价函数
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.square(y - prediction), name = 'loss')
tf.summary.scalar('loss', loss)
#使用梯度下降法训练
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss) #合并所有summary
merged = tf.summary.merge_all()
with tf.Session() as sess:
#变量初始化
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('logs/', sess.graph)
for i in range(1000):
summary, _ = sess.run([merged, train_step], feed_dict = {x: x_data, y: y_data})
writer.add_summary(summary, i)
curr_loss = sess.run(loss, feed_dict = {x: x_data, y: y_data})
if (i + 1)%100 == 0:
print('第%d次迭代loss:'%(i + 1), curr_loss)
#训练集预测集
prediction_value = sess.run(prediction, feed_dict = {x: x_data})
#测试集预测集
prediction_value_test = sess.run(prediction, feed_dict = {x: xTest})
test_loss = sess.run(loss, feed_dict = {x: xTest, y: yTest})
print('测试误差:', test_loss)
# print(len(prediction_value_test)) #text数据的个数
# print(yTest)
print(prediction_value_test)
from __future__ import print_function

import numpy as np
np.random.seed(1337) from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.utils import np_utils
from keras.optimizers import SGD batch_size = 128 nb_classes = 10 nb_epoch = 20 (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples') Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes) model1 = Sequential() model1.add(Dense(256, activation='relu', input_dim=784))
model1.add(Dropout(0.2))
model1.add(Dense(256, activation='relu'))
model1.add(Dropout(0.2))
model1.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model1.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy']) history1 = model1.fit(X_train, Y_train,
batch_size = batch_size,
epochs = nb_epoch,
verbose = 2,
validation_data = (X_test, Y_test)) model2 = Sequential() model2.add(Dense(256, activation='relu', input_dim=784)) model2.add(Dense(256, activation='relu')) model2.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model2.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy']) history2 = model2.fit(X_train, Y_train,
batch_size = batch_size,
epochs = nb_epoch,
verbose = 2,
validation_data = (X_test, Y_test))
model3 = Sequential() model3.add(Dense(256, activation='relu', input_dim=784)) model3.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model3.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy']) history3 = model3.fit(X_train, Y_train,
batch_size = batch_size,
epochs = nb_epoch,
verbose = 2,
validation_data = (X_test, Y_test)) import matplotlib.pyplot as plt
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history1.history['acc'])
plt.plot(history1.history['val_acc'])
plt.title('model1 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history2.history['acc'])
plt.plot(history2.history['val_acc'])
plt.title('model2 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history3.history['acc'])
plt.plot(history3.history['val_acc'])
plt.title('model3 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history1.history['val_acc'])
plt.plot(history2.history['val_acc'])
plt.plot(history3.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['model1', 'model2', 'model3'], loc='upper left')
plt.show() # summarize history for loss
plt.plot(history1.history['loss'])
plt.plot(history1.history['val_loss'])
plt.title('model1 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history2.history['loss'])
plt.plot(history2.history['val_loss'])
plt.title('model2 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history3.history['loss'])
plt.plot(history3.history['val_loss'])
plt.title('model3 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

bp代码的更多相关文章

  1. rcu-bp关键代码解读

    1      什么是TLS 原理在网上资料很多,这里不展开. 简单点说,动态申请的每线程变量.有一类比较熟悉的每线程变量是一个带__thread的每线程变量,两者的区别在于,TLS这类每线程变量是动态 ...

  2. 利用c++编写bp神经网络实现手写数字识别详解

    利用c++编写bp神经网络实现手写数字识别 写在前面 从大一入学开始,本菜菜就一直想学习一下神经网络算法,但由于时间和资源所限,一直未展开比较透彻的学习.大二下人工智能课的修习,给了我一个学习的契机. ...

  3. Notes on Convolutional Neural Networks

    这是Jake Bouvrie在2006年写的关于CNN的训练原理,虽然文献老了点,不过对理解经典CNN的训练过程还是很有帮助的.该作者是剑桥的研究认知科学的.翻译如有不对之处,还望告知,我好及时改正, ...

  4. Windows调试神器:WinDBG

    Q:WinDBG的Watch窗口中我想要查看长字符串,但是后面的内容都被省略为...了怎么办? A:如图,双击你要查看的内容,出现光标后,移动光标即可查看后面被省略的内容 Q:WinDBG如何给程序设 ...

  5. NPashaP的二分图源码部分

    源码链接:https://github.com/nelsonkuang/ant-admin/blob/master/src/utils/d3-viz.js 的二分图部分. 1.整体的级联结构 整个bp ...

  6. Kinect舒适区范围--UE4 的Blueprint测试范例

    本文章由cartzhang编写,转载请注明出处. 所有权利保留. 文章链接: http://blog.csdn.net/cartzhang/article/details/44748475 作者:ca ...

  7. Python内存管理机制-《源码解析》

    Python内存管理机制 Python 内存管理分层架构 /* An object allocator for Python. Here is an introduction to the layer ...

  8. 神经网络BP算法C和python代码

    上面只显示代码. 详BP原理和神经网络的相关知识,请参阅:神经网络和反向传播算法推导 首先是前向传播的计算: 输入: 首先为正整数 n.m.p.t,分别代表特征个数.训练样本个数.隐藏层神经元个数.输 ...

  9. BP神经网络算法推导及代码实现笔记zz

    一. 前言: 作为AI入门小白,参考了一些文章,想记点笔记加深印象,发出来是给有需求的童鞋学习共勉,大神轻拍! [毒鸡汤]:算法这东西,读完之后的状态多半是 --> “我是谁,我在哪?” 没事的 ...

随机推荐

  1. HTTP 响应代码

    https://developer.mozilla.org/zh-CN/docs/Web/HTTP/Status HTTP 响应状态代码指示特定 HTTP 请求是否已成功完成.响应分为五类:信息响应, ...

  2. Django08-批量创建数据

    通过views.py文件中创建 第1种方法循环创建数据, 这种方法不推荐,因为每一次循环都会连接一次数据库,效率较慢 def user_list(request): user_all = models ...

  3. Windows、Linux的环境变量

    Windows操作系统 什么是环境变量?环境变量一般是指在操作系统中用来指定操作系统运行环境的一些参数,比如临时文件夹位置和系统文件夹位置等. 这点有点类似于DOS时期的默认路径,当你运行某些程序时除 ...

  4. 《Spring_four》团队作业4—基于原型的团队项目需求调研与分析

    (一)需求规格说明书github地址:https://github.com/gzyt/SRS (二)原型链接:http://www.cnblogs.com/lztxh/p/9011873.html ( ...

  5. 基于IAR平台FreeRTOS移植

     开始这篇文章之前先简单说明一下,我使用的MCU是我们公司自主研发的ACH1180芯片,和STM32差不多,都是Cortex-M4的核,所以移植的过程参考了STM32移植的步骤. 1.解压FreeRT ...

  6. Mysql 视图,触发器,存储过程,函数,事务

    视图 视图虚拟表,是一个我们真实查询结果表,我们希望将某次查询出来的结果作为单独的一个表,就叫视图,无法对图字段内容进行增删改. --格式: CREATE VIEW 视图名字 AS 操作; --比如: ...

  7. C/C++扩展Python的时候数据类型转换的对应:

  8. JVM学习03:性能监控工具

    JVM学习03:性能监控工具 写在前面:本系列分享主要参考资料是  周志明老师的<深入理解Java虚拟机>第二版. 性能监控工具知识要点Xmind梳理 案例分析 案例分析1-JPS 案例分 ...

  9. python—列表生成式

    #原始写法 l=[] for i in range(1,11): l.append(str(i).zfill(2)) print(l) #结果:['01', '02', '03', '04', '05 ...

  10. windows server 2012 远程桌面不好使

    下面的文章里讲的比较详细 http://www.hfkehu.cn/thread-4382-1-1.html 我遇到的问题是第一种,因为是刚装的机器,刚连上网时,选择如下设置时,因为鼠标一点别的地方, ...