TensorFlow和Keras完成JAFFE人脸表情识别
cut_save_face.py
#!/usr/bin/python
# coding:utf8
import cv2
import os
import numpy as np
import csv
def detect(img, cascade):
"""
使用Haar特征检测分类器完成人脸检测
:param img:
:param cascade:
:return:
"""
# detectMultiScale检测出图片中所有的人脸,并将人脸用vector保存各个人脸的坐标、大小(用矩形表示),返回坐标。
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:, 2:] += rects[:, :2]
return rects
cascade = cv2.CascadeClassifier(
"E:/Anaconda3/envs/sklearn/Library/etc/haarcascades/haarcascade_frontalface_alt.xml")
f = "jaffe/"
fs = os.listdir(f)
data = np.zeros([213, 48 * 48], dtype=np.uint8)
label = np.zeros([213], dtype=int)
i = 0
for f1 in fs:
tmp_path = os.path.join(f, f1)
if not os.path.isdir(tmp_path):
# print(tmp_path[len(f):])
img = cv2.imread(tmp_path, 1)
# BGR转换为灰度图
dst = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detect(dst, cascade)
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1 + 10, y1 + 20), (x2 - 10, y2), (0, 255, 255), 2)
# 调整截取脸部区域大小
img_roi = np.uint8([y2 - (y1 + 20), (x2 - 10) - (x1 + 10)])
roi = dst[y1 + 20:y2, x1 + 10:x2 - 10]
img_roi = roi
re_roi = cv2.resize(img_roi, (48, 48))
# 获得表情label
img_label = tmp_path[len(f) + 3:len(f) + 5]
# print(img_label)
if img_label == 'AN':
label[i] = 0
elif img_label == 'DI':
label[i] = 1
elif img_label == 'FE':
label[i] = 2
elif img_label == 'HA':
label[i] = 3
elif img_label == 'SA':
label[i] = 4
elif img_label == 'SU':
label[i] = 5
elif img_label == 'NE':
label[i] = 6
else:
print("get label error.......\n")
# flatten返回一个折叠成一维的数组。但是该函数只能适用于numpy对象,即array或者mat,普通的list列表是不行的。
data[i][0:48 * 48] = np.ndarray.flatten(re_roi)
i = i + 1
# cv2.imshow("src", dst)
# cv2.imshow("img", img)
# if cv2.waitKey() == 32:
# continue
with open(r"face.csv", "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['emotion', 'pixels'])
for i in range(len(label)):
data_list = list(data[i])
b = " ".join(str(x) for x in data_list)
# 在水平方向上平铺
l = np.hstack([label[i], b])
writer.writerow(l)
detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
- 参数image--待检测图片,一般为灰度图像加快检测速度;
- 参数scaleFactor--表示在前后两次相继的扫描中,搜索窗口的比例系数。默认为1.1即每次搜索窗口依次扩大10%;
- 参数minNeighbors--表示构成检测目标的相邻矩形的最小个数(默认为3个)。如果组成检测目标的小矩形的个数和小于 min_neighbors - 1 都会被排除。如果min_neighbors 为 0, 则函数不做任何操作就返回所有的被检候选矩形框,这种设定值一般用在用户自定义对检测结果的组合程序上;
- 参数flags--flags=0:可以取如下这些值:CASCADE_DO_CANNY_PRUNING=1,利用canny边缘检测来排除一些边缘很少或者很多的图像区域,CASCADE_SCALE_IMAGE=2,正常比例检测,CASCADE_FIND_BIGGEST_OBJECT=4,只检测最大的物体,CASCADE_DO_ROUGH_SEARCH=8 初略的检测
show_facial_expression.py
#!/usr/bin/python
# coding:utf8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
emotion = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
data = pd.read_csv(r'face.csv', dtype='a')
label = np.array(data['emotion'])
img_data = np.array(data['pixels'])
N_sample = label.size
Face_data = np.zeros((N_sample, 48 * 48))
Face_label = np.zeros((N_sample, 7), dtype=int)
# 显示人脸以及对应表情
for i in range(25):
x = img_data[i]
# 使用字符串创建矩阵。
x = np.fromstring(x, dtype=float, sep=' ')
x = x / x.max()
img_x = np.reshape(x, (48, 48))
plt.subplot(5, 5, i + 1)
plt.axis('off')
plt.title(emotion[int(label[i])])
plt.imshow(img_x, plt.cm.gray)
plt.show()
count_facial_expression.py
#!/usr/bin/python
# coding:utf8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
emotion = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
data = pd.read_csv(r'face.csv', dtype='a')
label = np.array(data['emotion'])
img_data = np.array(data['pixels'])
N_sample = label.size
emotions = np.zeros(7)
for i in label:
for j in range(7):
if int(i) == j:
emotions[j] = emotions[j] + 1
print(emotions)
plt.bar(range(7), emotions, 0.5, color=['red', 'green', 'blue'])
plt.xlabel('emotions')
plt.xticks(range(7), ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'], rotation=0)
plt.ylabel('number')
plt.grid()
plt.show()
train_keras.py
#!/usr/bin/python
# coding:utf8
import numpy as np
import pandas as pd
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
# 表情类别
emotion = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
# 读取数据
data = pd.read_csv(r'face.csv', dtype='a')
# 读取标签列表
label = np.array(data['emotion'])
# 图像列表
img_data = np.array(data['pixels'])
# 图像数量
N_sample = label.size
# (213, 2304)
Face_data = np.zeros((N_sample, 48 * 48))
# (213, 7)
Face_label = np.zeros((N_sample, 7), dtype=np.float)
for i in range(N_sample):
x = img_data[i]
x = np.fromstring(x, dtype=float, sep=' ')
x = x / x.max()
Face_data[i] = x
Face_label[i, int(label[i])] = 1.0
# 训练数据数量
train_num = 200
# 测试数据数量
test_num = 13
# 训练数据
train_x = Face_data[0:train_num, :]
train_y = Face_label[0:train_num, :]
train_x = train_x.reshape(-1, 48, 48, 1) # reshape
# 测试数据
test_x = Face_data[train_num: train_num + test_num, :]
test_y = Face_label[train_num: train_num + test_num, :]
test_x = test_x.reshape(-1, 48, 48, 1) # reshape
# 序贯模型
model = Sequential()
model.add(Conv2D(32, (5, 5), activation='relu', input_shape=(48, 48, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# 扩增数据
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
datagen.fit(train_x)
model.fit_generator(datagen.flow(train_x, train_y, batch_size=10), steps_per_epoch=len(train_x), epochs=20)
model.fit(train_x, train_y, batch_size=10, epochs=100)
score = model.evaluate(test_x, test_y, batch_size=10)
print("score:", score)
model.summary()
输出结果
Epoch 50/50
10/200 [>.............................] - ETA: 1s - loss: 0.0015 - acc: 1.0000
20/200 [==>...........................] - ETA: 1s - loss: 7.8377e-04 - acc: 1.0000
30/200 [===>..........................] - ETA: 1s - loss: 0.0013 - acc: 1.0000
40/200 [=====>........................] - ETA: 1s - loss: 0.0012 - acc: 1.0000
50/200 [======>.......................] - ETA: 1s - loss: 0.0013 - acc: 1.0000
60/200 [========>.....................] - ETA: 1s - loss: 0.0011 - acc: 1.0000
70/200 [=========>....................] - ETA: 1s - loss: 0.0013 - acc: 1.0000
80/200 [===========>..................] - ETA: 1s - loss: 0.0012 - acc: 1.0000
90/200 [============>.................] - ETA: 0s - loss: 0.0011 - acc: 1.0000
100/200 [==============>...............] - ETA: 0s - loss: 0.0011 - acc: 1.0000
110/200 [===============>..............] - ETA: 0s - loss: 9.7286e-04 - acc: 1.0000
120/200 [=================>............] - ETA: 0s - loss: 8.9958e-04 - acc: 1.0000
130/200 [==================>...........] - ETA: 0s - loss: 8.3767e-04 - acc: 1.0000
140/200 [====================>.........] - ETA: 0s - loss: 9.1249e-04 - acc: 1.0000
150/200 [=====================>........] - ETA: 0s - loss: 9.3190e-04 - acc: 1.0000
160/200 [=======================>......] - ETA: 0s - loss: 9.0101e-04 - acc: 1.0000
170/200 [========================>.....] - ETA: 0s - loss: 8.6291e-04 - acc: 1.0000
180/200 [==========================>...] - ETA: 0s - loss: 8.2539e-04 - acc: 1.0000
190/200 [===========================>..] - ETA: 0s - loss: 7.8394e-04 - acc: 1.0000
200/200 [==============================] - 2s 9ms/step - loss: 7.8767e-04 - acc: 1.0000
10/13 [======================>.......] - ETA: 0s
13/13 [==============================] - 0s 5ms/step
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 44, 44, 32) 832
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 22, 22, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 18, 18, 64) 51264
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 9, 9, 64) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 5184) 0
_________________________________________________________________
dense_1 (Dense) (None, 1024) 5309440
_________________________________________________________________
dropout_1 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_2 (Dense) (None, 7) 7175
=================================================================
Total params: 5,368,711
Trainable params: 5,368,711
Non-trainable params: 0
_________________________________________________________________
train_tensorflow.py
#!/usr/bin/python
# coding:utf8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
def conv_pool_layer(data, weights_size, biases_size):
"""
卷积神经网络的层
:param data:
:param weights_size:
:param biases_size:
:return:
"""
weights = tf.Variable(tf.truncated_normal(weights_size, stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=biases_size))
conv2d = tf.nn.conv2d(data, weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(conv2d + biases)
return tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def linear_layer(data, weights_size, biases_size):
weights = tf.Variable(tf.truncated_normal(weights_size, stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=biases_size))
return tf.add(tf.matmul(data, weights), biases)
def convolutional_neural_network(x, keep_prob):
"""
卷积神经网络
:param x:
:param keep_prob:
:return:
"""
x_image = tf.reshape(x, [-1, 48, 48, 1])
h_pool1 = conv_pool_layer(x_image, [5, 5, 1, 32], [32])
h_pool2 = conv_pool_layer(h_pool1, [5, 5, 32, 64], [64])
h_pool2_flat = tf.reshape(h_pool2, [-1, 12 * 12 * 64])
h_fc1 = tf.nn.relu(linear_layer(h_pool2_flat, [12 * 12 * 64, 1024], [1024]))
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
return tf.nn.softmax(linear_layer(h_fc1_drop, [1024, class_sum], [class_sum]))
def batch_data(x, y, batch, num):
ind = np.arange(num)
index = ind[batch * batch_size:(batch + 1) * batch_size]
batch_x = x[index, :]
batch_y = y[index, :]
return batch_x, batch_y
if __name__ == "__main__":
# 表情类别
emotion = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
# 数据
data = pd.read_csv(r'face.csv', dtype='a')
# 表情标签
label = np.array(data['emotion'])
# 图片数据
img_data = np.array(data['pixels'])
# 图片数量
N_sample = label.size
# 图片矩阵
Face_data = np.zeros((N_sample, 48 * 48))
# 标签矩阵
Face_label = np.zeros((N_sample, 7), dtype=int)
# 遍历将图片和标签构建为矩阵形式
for i in range(N_sample):
x = img_data[i]
x = np.fromstring(x, dtype=float, sep=' ')
x = x / x.max()
Face_data[i] = x
Face_label[i, int(label[i])] = 1
# 参数
dropout = 0.5
class_sum = 7
# dropout减轻过拟合问题
keep_prob = tf.placeholder(tf.float32)
x = tf.placeholder(tf.float32, [None, 48 * 48])
y = tf.placeholder(tf.float32, [None, class_sum])
# 构建卷积神经网络
pred = convolutional_neural_network(x, keep_prob)
# 取前200个作为训练数据,后13个为测试数据
train_num = 200
test_num = 13
train_x = Face_data[0:train_num, :]
train_y = Face_label[0:train_num, :]
test_x = Face_data[train_num: train_num + test_num, :]
test_y = Face_label[train_num: train_num + test_num, :]
batch_size = 20
train_batch_num = int(train_num / batch_size)
test_batch_num = test_num / batch_size
# 训练和评估模型
cross_entropy = -tf.reduce_sum(y * tf.log(pred))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, "float"))
total_train_loss = []
total_train_acc = []
total_test_loss = []
total_test_acc = []
train_epoch = 50
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(0, train_epoch):
Total_train_loss = 0
Total_train_acc = 0
for train_batch in range(0, train_batch_num):
batch_x, batch_y = batch_data(train_x, train_y, train_batch, train_num)
# 优化操作
sess.run(train_step, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
if train_batch % batch_size == 0:
# 计算损失和准确率
loss, acc = sess.run([cross_entropy, accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
print("Epoch: " + str(epoch + 1) + ", Batch: " + str(train_batch) + ", Loss= " + "{:.3f}".format(
loss) + ", Training Accuracy= " + "{:.3f}".format(acc))
Total_train_loss = Total_train_loss + loss
Total_train_acc = Total_train_acc + acc
total_train_loss.append(Total_train_loss)
total_train_acc.append(Total_train_acc)
plt.subplot(2, 1, 1)
plt.ylabel('Train loss')
plt.plot(total_train_loss, 'r')
plt.subplot(2, 1, 2)
plt.ylabel('Train accuracy')
plt.plot(total_train_acc, 'r')
plt.savefig("loss_acc.png")
plt.show()
输出结果
Epoch: 1, Batch: 0, Loss= 119.417, Training Accuracy= 0.150
Epoch: 2, Batch: 0, Loss= 49.221, Training Accuracy= 0.250
Epoch: 3, Batch: 0, Loss= 39.813, Training Accuracy= 0.200
Epoch: 4, Batch: 0, Loss= 43.067, Training Accuracy= 0.300
Epoch: 5, Batch: 0, Loss= 28.889, Training Accuracy= 0.500
Epoch: 6, Batch: 0, Loss= 18.173, Training Accuracy= 0.700
Epoch: 7, Batch: 0, Loss= 18.474, Training Accuracy= 0.600
Epoch: 8, Batch: 0, Loss= 15.813, Training Accuracy= 0.800
Epoch: 9, Batch: 0, Loss= 12.878, Training Accuracy= 0.850
Epoch: 10, Batch: 0, Loss= 13.250, Training Accuracy= 0.800
Epoch: 11, Batch: 0, Loss= 8.750, Training Accuracy= 0.950
Epoch: 12, Batch: 0, Loss= 10.424, Training Accuracy= 0.850
Epoch: 13, Batch: 0, Loss= 7.592, Training Accuracy= 1.000
Epoch: 14, Batch: 0, Loss= 7.146, Training Accuracy= 0.950
Epoch: 15, Batch: 0, Loss= 7.377, Training Accuracy= 1.000
Epoch: 16, Batch: 0, Loss= 5.423, Training Accuracy= 1.000
Epoch: 17, Batch: 0, Loss= 6.173, Training Accuracy= 1.000
Epoch: 18, Batch: 0, Loss= 4.069, Training Accuracy= 1.000
Epoch: 19, Batch: 0, Loss= 4.163, Training Accuracy= 1.000
Epoch: 20, Batch: 0, Loss= 3.650, Training Accuracy= 1.000
Epoch: 21, Batch: 0, Loss= 3.317, Training Accuracy= 1.000
Epoch: 22, Batch: 0, Loss= 4.195, Training Accuracy= 1.000
Epoch: 23, Batch: 0, Loss= 2.729, Training Accuracy= 1.000
Epoch: 24, Batch: 0, Loss= 2.448, Training Accuracy= 1.000
Epoch: 25, Batch: 0, Loss= 2.614, Training Accuracy= 1.000
Epoch: 26, Batch: 0, Loss= 2.424, Training Accuracy= 1.000
Epoch: 27, Batch: 0, Loss= 2.707, Training Accuracy= 1.000
Epoch: 28, Batch: 0, Loss= 2.072, Training Accuracy= 1.000
Epoch: 29, Batch: 0, Loss= 1.726, Training Accuracy= 1.000
Epoch: 30, Batch: 0, Loss= 1.701, Training Accuracy= 1.000
Epoch: 31, Batch: 0, Loss= 1.598, Training Accuracy= 1.000
Epoch: 32, Batch: 0, Loss= 1.381, Training Accuracy= 1.000
Epoch: 33, Batch: 0, Loss= 1.826, Training Accuracy= 1.000
Epoch: 34, Batch: 0, Loss= 1.227, Training Accuracy= 1.000
Epoch: 35, Batch: 0, Loss= 1.320, Training Accuracy= 1.000
Epoch: 36, Batch: 0, Loss= 1.110, Training Accuracy= 1.000
Epoch: 37, Batch: 0, Loss= 0.875, Training Accuracy= 1.000
Epoch: 38, Batch: 0, Loss= 1.214, Training Accuracy= 1.000
Epoch: 39, Batch: 0, Loss= 0.982, Training Accuracy= 1.000
Epoch: 40, Batch: 0, Loss= 0.982, Training Accuracy= 1.000
Epoch: 41, Batch: 0, Loss= 0.681, Training Accuracy= 1.000
Epoch: 42, Batch: 0, Loss= 0.839, Training Accuracy= 1.000
Epoch: 43, Batch: 0, Loss= 0.777, Training Accuracy= 1.000
Epoch: 44, Batch: 0, Loss= 0.671, Training Accuracy= 1.000
Epoch: 45, Batch: 0, Loss= 0.859, Training Accuracy= 1.000
Epoch: 46, Batch: 0, Loss= 0.529, Training Accuracy= 1.000
Epoch: 47, Batch: 0, Loss= 0.707, Training Accuracy= 1.000
Epoch: 48, Batch: 0, Loss= 0.491, Training Accuracy= 1.000
Epoch: 49, Batch: 0, Loss= 0.500, Training Accuracy= 1.000
Epoch: 50, Batch: 0, Loss= 0.415, Training Accuracy= 1.000
train_tensorboard.py
#!/usr/bin/python
# coding:utf8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
def conv_pool_layer(data, weights_size, biases_size):
weights = tf.Variable(tf.truncated_normal(weights_size, stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=biases_size))
conv2d = tf.nn.conv2d(data, weights, strides=[1, 1, 1, 1], padding='SAME')
relu = tf.nn.relu(conv2d + biases)
return tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def linear_layer(data, weights_size, biases_size):
weights = tf.Variable(tf.truncated_normal(weights_size, stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=biases_size))
return tf.add(tf.matmul(data, weights), biases)
def convolutional_neural_network(x, keep_prob):
with tf.name_scope('input'):
x_image = tf.reshape(x, [-1, 48, 48, 1])
with tf.name_scope('conv1'):
h_pool1 = conv_pool_layer(x_image, [5, 5, 1, 32], [32])
with tf.name_scope('conv2'):
h_pool2 = conv_pool_layer(h_pool1, [5, 5, 32, 64], [64])
h_pool2_flat = tf.reshape(h_pool2, [-1, 12 * 12 * 64])
with tf.name_scope('fc3'):
h_fc1 = tf.nn.relu(linear_layer(h_pool2_flat, [12 * 12 * 64, 1024], [1024]))
with tf.name_scope('dropout4'):
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
with tf.name_scope('softmax5'):
out = tf.nn.softmax(linear_layer(h_fc1_drop, [1024, class_sum], [class_sum]))
return out
def batch_data(x, y, batch, num):
ind = np.arange(num)
index = ind[batch * batch_size:(batch + 1) * batch_size]
batch_x = x[index, :]
batch_y = y[index, :]
return batch_x, batch_y
if __name__ == "__main__":
emotion = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
data = pd.read_csv(r'face.csv', dtype='a')
label = np.array(data['emotion'])
img_data = np.array(data['pixels'])
N_sample = label.size
Face_data = np.zeros((N_sample, 48 * 48))
Face_label = np.zeros((N_sample, 7), dtype=int)
for i in range(N_sample):
x = img_data[i]
x = np.fromstring(x, dtype=float, sep=' ')
x = x / x.max()
Face_data[i] = x
Face_label[i, int(label[i])] = 1
# 参数
dropout = 0.5
class_sum = 7
# dropout减轻过拟合问题
keep_prob = tf.placeholder(tf.float32)
x = tf.placeholder(tf.float32, [None, 48 * 48])
y = tf.placeholder(tf.float32, [None, class_sum])
pred = convolutional_neural_network(x, keep_prob)
# 取前200个作为训练数据,后13个为测试数据
train_num = 200
test_num = 13
train_x = Face_data[0:train_num, :]
train_y = Face_label[0:train_num, :]
test_x = Face_data[train_num: train_num + test_num, :]
test_y = Face_label[train_num: train_num + test_num, :]
batch_size = 20
train_batch_num = int(train_num / batch_size)
test_batch_num = test_num / batch_size
# 训练和评估模型
with tf.name_scope('cross_entropy'):
cross_entropy = -tf.reduce_sum(y * tf.log(pred))
tf.summary.histogram("cross_entropy", cross_entropy)
# tf.summary.scalar("cross_entropy", cross_entropy)
with tf.name_scope('minimize'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_pred'):
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_pred, "float"))
tf.summary.histogram("accuracy", accuracy)
# 输出包含单个标量值的摘要协议缓冲区
tf.summary.scalar('accuracy', accuracy)
total_train_loss = []
total_train_acc = []
total_test_loss = []
total_test_acc = []
train_epoch = 50
# 合并在默认图形中收集的所有摘要
merged = tf.summary.merge_all()
with tf.Session() as sess:
writer = tf.summary.FileWriter("tmp/logs", sess.graph)
sess.run(tf.global_variables_initializer())
for epoch in range(0, train_epoch):
Total_train_loss = 0
Total_train_acc = 0
for train_batch in range(0, train_batch_num):
batch_x, batch_y = batch_data(train_x, train_y, train_batch, train_num)
# 优化操作
# sess.run(train_step, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
summary, _ = sess.run([merged, train_step], feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
writer.add_summary(summary, train_batch)
if train_batch % batch_size == 0:
# 计算损失和准确率
loss, acc = sess.run([cross_entropy, accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
print("Epoch: " + str(epoch + 1) + ", Batch: " + str(train_batch) +
", Loss= " + "{:.3f}".format(loss) +
", Training Accuracy= " + "{:.3f}".format(acc))
Total_train_loss = Total_train_loss + loss
Total_train_acc = Total_train_acc + acc
total_train_loss.append(Total_train_loss)
total_train_acc.append(Total_train_acc)
writer.close()
plt.subplot(2, 1, 1)
plt.ylabel('Train loss')
plt.plot(total_train_loss, 'r')
plt.subplot(2, 1, 2)
plt.ylabel('Train accuracy')
plt.plot(total_train_acc, 'r')
plt.savefig("face_loss_acc.png")
plt.show()
TensorFlow和Keras完成JAFFE人脸表情识别的更多相关文章
- CVPR 2020几篇论文内容点评:目标检测跟踪,人脸表情识别,姿态估计,实例分割等
CVPR 2020几篇论文内容点评:目标检测跟踪,人脸表情识别,姿态估计,实例分割等 CVPR 2020中选论文放榜后,最新开源项目合集也来了. 本届CPVR共接收6656篇论文,中选1470篇,&q ...
- 【Gabor】基于多尺度多方向Gabor融合+分块直方图的表情识别
Topic:表情识别Env: win10 + Pycharm2018 + Python3.6.8Date: 2019/6/23~25 by hw_Chen2018 ...
- 42 在Raspberry Pi上安装dlib表情识别
https://www.jianshu.com/p/848014d8dea9 https://www.pyimagesearch.com/2017/05/01/install-dlib-raspber ...
- 基于深度学习的人脸性别识别系统(含UI界面,Python代码)
摘要:人脸性别识别是人脸识别领域的一个热门方向,本文详细介绍基于深度学习的人脸性别识别系统,在介绍算法原理的同时,给出Python的实现代码以及PyQt的UI界面.在界面中可以选择人脸图片.视频进行检 ...
- keras系列︱人脸表情分类与识别:opencv人脸检测+Keras情绪分类(四)
引自:http://blog.csdn.net/sinat_26917383/article/details/72885715 人脸识别热门,表情识别更加.但是表情识别很难,因为人脸的微表情很多,本节 ...
- 深度学习项目——基于卷积神经网络(CNN)的人脸在线识别系统
基于卷积神经网络(CNN)的人脸在线识别系统 本设计研究人脸识别技术,基于卷积神经网络构建了一套人脸在线检测识别系统,系统将由以下几个部分构成: 制作人脸数据集.CNN神经网络模型训练.人脸检测.人脸 ...
- 机器学习: Tensor Flow with CNN 做表情识别
我们利用 TensorFlow 构造 CNN 做表情识别,我们用的是FER-2013 这个数据库, 这个数据库一共有 35887 张人脸图像,这里只是做一个简单到仿真实验,为了计算方便,我们用其中到 ...
- UWP通过机器学习加载ONNX进行表情识别
首先我们先来说说这个ONNX ONNX是一种针对机器学习所设计的开放式的文件格式,用于存储训练好的模型.它使得不同的人工智能框架(如Pytorch, MXNet)可以采用相同格式存储模型数据并交互. ...
- Python学习案例之视频人脸检测识别
前言 上一篇博文与大家分享了简单的图片人脸识别技术,其实在实际应用中,很多是通过视频流的方式进行识别,比如人脸识别通道门禁考勤系统.人脸动态跟踪识别系统等等. 案例 这里我们还是使用 opencv 中 ...
随机推荐
- JWT(Json Web Token—)的定义及组成
JWT定义及其组成 JWT(JSON Web Token)是一个非常轻巧的规范.这个规范允许我们使用JWT在用户和服务器之间传递安全可靠的信息. 一个JWT实际上就是一个字符串,它由三部分组成,头部. ...
- java.io.UTFDataFormatException: encoded string too long:
java.io.UTFDataFormatException: encoded string too long: 259553 bytes 按如下修改可避开此问题. - output.writeUTF ...
- HttpRequest获得服务端和客户端的详细信息
参考文档:http://blog.csdn.net/u012104100/article/details/43051301 http://blog.csdn.net/u011162260/articl ...
- background-color:transparent
background-color没有none值 在工作中发现, 这样是没反应的, 要写这个样式才能去掉背景颜色() background-color属性详细链接: http://www.w3sch ...
- VUEX新笔记
$store.commit('abc'),const mutations={abc:(state)=>{ state.flag='mutations' }} 多个mutations时用到dist ...
- Docker win10安装
因为虚拟机还没装好,所以现在win10上安装Docker 1.首先下载Docker Toolbox,因为Docker for windows需要win10专业版或者其他64位版本,我的系统虽然也是wi ...
- 初识NLTK
需要用处理英文文本,于是用到python中nltk这个包 f = open(r"D:\Postgraduate\Python\Python爬取美国商标局专利\s_exp.txt") ...
- Eclipse 00: 常用快捷键
Eclipse常用快捷键 1几个最重要的快捷键 代码助手:Ctrl+Space(简体中文操作系统是Alt+/)快速修正:Ctrl+1单词补全:Alt+/打开外部Java文档:Shift+F2 显示搜索 ...
- Ubuntu上Qt之简单图片浏览器
>>主要功能: (1)图片切换浏览,上一张/下一张. (2)图片放大.缩小.包括两种机制:鼠标滚轮和按钮放大/缩小. (3)图片自动循环播放,间隔2s.点击播放后,其他操作均无效,直至点 ...
- WebMagic
一.WebMagic的四个组件 1.Downloader Downloader负责从互联网上下载页面,默认使用apache HttpClient作为下载工具 2.PageProcessor 负责解析页 ...