下面是分类的主函数入口

#! /usr/bin/env python

import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn # Parameters
# ================================================== # Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.") # Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)") # Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") FLAGS = tf.flags.FLAGS
#FLAGS._parse_flags()
#print("\nParameters:")
#for attr, value in sorted(FLAGS.__flags.items()):
# print("{}={}".format(attr.upper(), value))
#print("") # Data Preparation
# ================================================== # Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file) # Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text))) # Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices] # Split train/test set
# TODO: This is very crude, should use cross-validation
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:] del x, y, x_shuffled, y_shuffled print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev))) # Training
# ================================================== with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda) # Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step) # Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries) # Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir)) # Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy) # Train Summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph) # Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph) # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints) # Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab")) # Initialize all variables
sess.run(tf.global_variables_initializer()) def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step) def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step) # Generate batches
batches = data_helpers.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))

下面是TextCNN模型的图构建过程:

import tensorflow as tf
import numpy as np class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
""" def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0): # Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob") # Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0) # Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1) # Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled) # Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total]) # Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob) # Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions") # Calculate mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss # Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")

下面是读取文本文件的过程:

import numpy as np
import re
import itertools
from collections import Counter def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower() def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file, "r", encoding='UTF-8').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r",encoding='UTF-8').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y] def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]

下面是训练过程中的log

Loading data...
Vocabulary Size: 18758
Train/Dev split: 9596/1066
2018-02-17 22:22:49.278753: I C:\tf_jenkins\workspace\rel-win\M\windows\PY\35\tensorflow\core\platform\cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
WARNING:tensorflow:From C:\Users\95890\eclipse-workspace\cnn-text-classification\text_cnn.py:79: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating: Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default. See tf.nn.softmax_cross_entropy_with_logits_v2. Writing to C:\Users\95890\eclipse-workspace\cnn-text-classification\runs\1518877370 2018-02-17T22:22:52.557895: step 1, loss 1.93882, acc 0.546875
2018-02-17T22:22:52.819592: step 2, loss 1.29348, acc 0.625
2018-02-17T22:22:53.237704: step 3, loss 1.63719, acc 0.625
2018-02-17T22:22:53.538504: step 4, loss 1.62667, acc 0.546875
2018-02-17T22:22:53.821257: step 5, loss 1.53661, acc 0.5
2018-02-17T22:22:54.090973: step 6, loss 1.49735, acc 0.59375
2018-02-17T22:22:54.362696: step 7, loss 1.78985, acc 0.515625
2018-02-17T22:22:54.645830: step 8, loss 1.83429, acc 0.484375
2018-02-17T22:22:54.904520: step 9, loss 1.55188, acc 0.546875
2018-02-17T22:22:55.164209: step 10, loss 1.57634, acc 0.5625
2018-02-17T22:22:55.422403: step 11, loss 1.28381, acc 0.609375
2018-02-17T22:22:55.667056: step 12, loss 1.76712, acc 0.484375
2018-02-17T22:22:55.933764: step 13, loss 1.70237, acc 0.515625
2018-02-17T22:22:56.181057: step 14, loss 1.41725, acc 0.578125
2018-02-17T22:22:56.426710: step 15, loss 1.73679, acc 0.515625
2018-02-17T22:22:56.673366: step 16, loss 1.70436, acc 0.53125
2018-02-17T22:22:56.923032: step 17, loss 1.47013, acc 0.546875
2018-02-17T22:22:57.174700: step 18, loss 1.72825, acc 0.515625
2018-02-17T22:22:57.422359: step 19, loss 1.73737, acc 0.578125
2018-02-17T22:22:57.671020: step 20, loss 1.95546, acc 0.4375
2018-02-17T22:22:57.952771: step 21, loss 2.3615, acc 0.5625
2018-02-17T22:22:58.263596: step 22, loss 1.7885, acc 0.546875
2018-02-17T22:22:58.571416: step 23, loss 1.74313, acc 0.515625
2018-02-17T22:22:59.029634: step 24, loss 2.38787, acc 0.40625
2018-02-17T22:22:59.281304: step 25, loss 1.2678, acc 0.546875
2018-02-17T22:22:59.529785: step 26, loss 2.15869, acc 0.46875
2018-02-17T22:22:59.776442: step 27, loss 1.63151, acc 0.546875
2018-02-17T22:23:00.025102: step 28, loss 2.05271, acc 0.40625
2018-02-17T22:23:00.288803: step 29, loss 1.44182, acc 0.53125
2018-02-17T22:23:00.546492: step 30, loss 1.87917, acc 0.40625
2018-02-17T22:23:00.885396: step 31, loss 1.58246, acc 0.5625
2018-02-17T22:23:01.131048: step 32, loss 1.6608, acc 0.578125
2018-02-17T22:23:01.376702: step 33, loss 1.8353, acc 0.484375
2018-02-17T22:23:01.617341: step 34, loss 1.51423, acc 0.546875
2018-02-17T22:23:01.866003: step 35, loss 1.82324, acc 0.40625
2018-02-17T22:23:02.109650: step 36, loss 1.46072, acc 0.625
2018-02-17T22:23:02.347282: step 37, loss 1.36666, acc 0.5625
2018-02-17T22:23:02.659112: step 38, loss 1.51059, acc 0.515625
2018-02-17T22:23:02.911785: step 39, loss 2.02954, acc 0.40625
2018-02-17T22:23:03.163454: step 40, loss 1.80641, acc 0.578125
2018-02-17T22:23:03.430164: step 41, loss 1.26004, acc 0.578125
2018-02-17T22:23:03.697875: step 42, loss 1.16646, acc 0.5625
2018-02-17T22:23:03.982632: step 43, loss 1.9329, acc 0.515625
2018-02-17T22:23:04.222269: step 44, loss 1.82429, acc 0.5625
2018-02-17T22:23:04.468926: step 45, loss 1.81926, acc 0.484375
2018-02-17T22:23:04.707560: step 46, loss 1.64037, acc 0.515625
2018-02-17T22:23:04.948200: step 47, loss 1.18086, acc 0.609375
2018-02-17T22:23:05.188841: step 48, loss 1.56999, acc 0.5
2018-02-17T22:23:05.454546: step 49, loss 1.7083, acc 0.578125
2018-02-17T22:23:05.712232: step 50, loss 1.44188, acc 0.546875
2018-02-17T22:23:06.055145: step 51, loss 1.36105, acc 0.484375
2018-02-17T22:23:06.293779: step 52, loss 1.67877, acc 0.453125
2018-02-17T22:23:06.530408: step 53, loss 1.45848, acc 0.5625
2018-02-17T22:23:06.781075: step 54, loss 1.70875, acc 0.484375
2018-02-17T22:23:07.036755: step 55, loss 1.28958, acc 0.59375
2018-02-17T22:23:07.286419: step 56, loss 1.48491, acc 0.53125
2018-02-17T22:23:07.525054: step 57, loss 1.47569, acc 0.46875
2018-02-17T22:23:07.796776: step 58, loss 0.863633, acc 0.65625
2018-02-17T22:23:08.037417: step 59, loss 1.93698, acc 0.53125
2018-02-17T22:23:08.311145: step 60, loss 1.85745, acc 0.453125
2018-02-17T22:23:08.551785: step 61, loss 1.27294, acc 0.578125
2018-02-17T22:23:08.865619: step 62, loss 1.86038, acc 0.453125
2018-02-17T22:23:09.137343: step 63, loss 1.32204, acc 0.53125
2018-02-17T22:23:09.370964: step 64, loss 1.31815, acc 0.578125
2018-02-17T22:23:09.615614: step 65, loss 2.00979, acc 0.4375
2018-02-17T22:23:09.861268: step 66, loss 1.78128, acc 0.53125
2018-02-17T22:23:10.093888: step 67, loss 2.13612, acc 0.484375
2018-02-17T22:23:10.340542: step 68, loss 1.58541, acc 0.484375
2018-02-17T22:23:10.579177: step 69, loss 1.20751, acc 0.546875
2018-02-17T22:23:10.811797: step 70, loss 1.41241, acc 0.515625
2018-02-17T22:23:11.053439: step 71, loss 1.54048, acc 0.53125
2018-02-17T22:23:11.293076: step 72, loss 1.67773, acc 0.484375
2018-02-17T22:23:11.535721: step 73, loss 1.64786, acc 0.53125
2018-02-17T22:23:11.781375: step 74, loss 1.24305, acc 0.453125
2018-02-17T22:23:12.014997: step 75, loss 1.34629, acc 0.484375
2018-02-17T22:23:12.249620: step 76, loss 1.34395, acc 0.59375
2018-02-17T22:23:12.483242: step 77, loss 1.27004, acc 0.5
2018-02-17T22:23:12.727892: step 78, loss 1.26064, acc 0.59375
2018-02-17T22:23:12.963519: step 79, loss 1.30957, acc 0.515625
2018-02-17T22:23:13.205162: step 80, loss 1.44296, acc 0.5
2018-02-17T22:23:13.482900: step 81, loss 1.1388, acc 0.640625
2018-02-17T22:23:13.767658: step 82, loss 1.32895, acc 0.515625
2018-02-17T22:23:14.032362: step 83, loss 1.96213, acc 0.4375
2018-02-17T22:23:14.289045: step 84, loss 1.25613, acc 0.515625
2018-02-17T22:23:14.531690: step 85, loss 1.79974, acc 0.40625
2018-02-17T22:23:14.785365: step 86, loss 1.15007, acc 0.53125
2018-02-17T22:23:15.025003: step 87, loss 0.934746, acc 0.625
2018-02-17T22:23:15.266648: step 88, loss 1.54477, acc 0.46875
2018-02-17T22:23:15.525333: step 89, loss 1.33321, acc 0.5625
2018-02-17T22:23:15.838165: step 90, loss 1.04884, acc 0.59375
2018-02-17T22:23:16.115904: step 91, loss 1.16065, acc 0.59375
2018-02-17T22:23:16.364567: step 92, loss 1.37733, acc 0.578125
2018-02-17T22:23:16.600192: step 93, loss 1.20027, acc 0.5625
2018-02-17T22:23:16.845845: step 94, loss 1.04309, acc 0.625
2018-02-17T22:23:17.085482: step 95, loss 1.51981, acc 0.46875
2018-02-17T22:23:17.327125: step 96, loss 0.903045, acc 0.625
2018-02-17T22:23:17.566763: step 97, loss 1.25021, acc 0.5
2018-02-17T22:23:17.815425: step 98, loss 1.36158, acc 0.46875
2018-02-17T22:23:18.064091: step 99, loss 1.35057, acc 0.53125
2018-02-17T22:23:18.301718: step 100, loss 1.50104, acc 0.4375 Evaluation:
2018-02-17T22:23:19.117889: step 100, loss 1.08254, acc 0.527205 Saved model checkpoint to C:\Users\95890\eclipse-workspace\cnn-text-classification\runs\1518877370\checkpoints\model-100 2018-02-17T22:23:20.139641: step 101, loss 1.24486, acc 0.578125
2018-02-17T22:23:20.389306: step 102, loss 1.33139, acc 0.46875
2018-02-17T22:23:20.635962: step 103, loss 1.6479, acc 0.5625
2018-02-17T22:23:20.882618: step 104, loss 1.24139, acc 0.53125
2018-02-17T22:23:21.135291: step 105, loss 1.28177, acc 0.515625
2018-02-17T22:23:21.383951: step 106, loss 1.45685, acc 0.484375
2018-02-17T22:23:21.628602: step 107, loss 0.931523, acc 0.625
2018-02-17T22:23:21.884281: step 108, loss 1.15422, acc 0.484375
2018-02-17T22:23:22.127931: step 109, loss 1.57511, acc 0.5
2018-02-17T22:23:22.368570: step 110, loss 1.14225, acc 0.59375
2018-02-17T22:23:22.619237: step 111, loss 1.43202, acc 0.546875
2018-02-17T22:23:22.859877: step 112, loss 1.0204, acc 0.609375
2018-02-17T22:23:23.104529: step 113, loss 1.25905, acc 0.5625
2018-02-17T22:23:23.350181: step 114, loss 1.15094, acc 0.53125
2018-02-17T22:23:23.617893: step 115, loss 1.17378, acc 0.515625
2018-02-17T22:23:23.865552: step 116, loss 1.20742, acc 0.578125
2018-02-17T22:23:24.103183: step 117, loss 1.61458, acc 0.53125
2018-02-17T22:23:24.352848: step 118, loss 1.60123, acc 0.484375
2018-02-17T22:23:24.622377: step 119, loss 1.59381, acc 0.4375
2018-02-17T22:23:24.875051: step 120, loss 1.1111, acc 0.53125
2018-02-17T22:23:25.119701: step 121, loss 1.43676, acc 0.515625
2018-02-17T22:23:25.373375: step 122, loss 1.07452, acc 0.65625
2018-02-17T22:23:25.624042: step 123, loss 1.39992, acc 0.46875
2018-02-17T22:23:25.890753: step 124, loss 1.2318, acc 0.484375
2018-02-17T22:23:26.130390: step 125, loss 1.58263, acc 0.46875
2018-02-17T22:23:26.380053: step 126, loss 1.35279, acc 0.453125
2018-02-17T22:23:26.630720: step 127, loss 1.21636, acc 0.578125
2018-02-17T22:23:26.875370: step 128, loss 1.11638, acc 0.5625
2018-02-17T22:23:27.115007: step 129, loss 1.28373, acc 0.5625
2018-02-17T22:23:27.361667: step 130, loss 1.06153, acc 0.609375
2018-02-17T22:23:27.609323: step 131, loss 0.93121, acc 0.65625
2018-02-17T22:23:27.862999: step 132, loss 1.04303, acc 0.515625
2018-02-17T22:23:28.107649: step 133, loss 1.1509, acc 0.546875
2018-02-17T22:23:28.358314: step 134, loss 1.36578, acc 0.53125
2018-02-17T22:23:28.598955: step 135, loss 1.00832, acc 0.59375
2018-02-17T22:23:28.847617: step 136, loss 1.35821, acc 0.5
2018-02-17T22:23:29.097280: step 137, loss 1.29283, acc 0.5
2018-02-17T22:23:29.338922: step 138, loss 1.19305, acc 0.515625
2018-02-17T22:23:29.597612: step 139, loss 1.00896, acc 0.53125
2018-02-17T22:23:29.846791: step 140, loss 0.963936, acc 0.515625
2018-02-17T22:23:30.095453: step 141, loss 1.19448, acc 0.5625
2018-02-17T22:23:30.338098: step 142, loss 1.04261, acc 0.5625
2018-02-17T22:23:30.588765: step 143, loss 1.12989, acc 0.578125
2018-02-17T22:23:30.840434: step 144, loss 1.10961, acc 0.5625
2018-02-17T22:23:31.083080: step 145, loss 1.25327, acc 0.609375
2018-02-17T22:23:31.332744: step 146, loss 0.740114, acc 0.625
2018-02-17T22:23:31.584413: step 147, loss 1.27842, acc 0.5
2018-02-17T22:23:31.835081: step 148, loss 1.14313, acc 0.578125
2018-02-17T22:23:32.072712: step 149, loss 1.25194, acc 0.546875
2018-02-17T22:23:32.309341: step 150, loss 0.931943, acc 0.65
2018-02-17T22:23:32.557006: step 151, loss 0.822591, acc 0.671875
2018-02-17T22:23:32.803656: step 152, loss 0.86916, acc 0.640625
2018-02-17T22:23:33.050314: step 153, loss 0.834351, acc 0.65625
2018-02-17T22:23:33.293960: step 154, loss 0.897595, acc 0.671875
2018-02-17T22:23:33.544628: step 155, loss 1.08369, acc 0.59375
2018-02-17T22:23:33.809331: step 156, loss 0.694065, acc 0.671875
2018-02-17T22:23:34.055987: step 157, loss 1.03324, acc 0.640625
2018-02-17T22:23:34.305651: step 158, loss 0.775803, acc 0.65625
2018-02-17T22:23:34.572362: step 159, loss 0.633237, acc 0.671875
2018-02-17T22:23:34.883188: step 160, loss 1.1404, acc 0.609375
2018-02-17T22:23:35.203038: step 161, loss 0.604739, acc 0.734375
2018-02-17T22:23:35.455710: step 162, loss 1.04967, acc 0.578125
2018-02-17T22:23:35.702366: step 163, loss 0.665491, acc 0.734375
2018-02-17T22:23:35.973087: step 164, loss 0.981186, acc 0.625
2018-02-17T22:23:36.210718: step 165, loss 1.07057, acc 0.5625
2018-02-17T22:23:36.461386: step 166, loss 0.807863, acc 0.640625
2018-02-17T22:23:36.706036: step 167, loss 0.95945, acc 0.53125
2018-02-17T22:23:36.952692: step 168, loss 0.906611, acc 0.6875
2018-02-17T22:23:37.197029: step 169, loss 0.990737, acc 0.546875
2018-02-17T22:23:37.438671: step 170, loss 1.12736, acc 0.5625
2018-02-17T22:23:37.687333: step 171, loss 0.941747, acc 0.59375
2018-02-17T22:23:37.938515: step 172, loss 0.768393, acc 0.640625
2018-02-17T22:23:38.188180: step 173, loss 1.21304, acc 0.53125
2018-02-17T22:23:38.428820: step 174, loss 0.803382, acc 0.59375
2018-02-17T22:23:38.666452: step 175, loss 1.00429, acc 0.578125
2018-02-17T22:23:38.915114: step 176, loss 0.806889, acc 0.671875
2018-02-17T22:23:39.160767: step 177, loss 1.02119, acc 0.578125
2018-02-17T22:23:39.406420: step 178, loss 0.778823, acc 0.65625
2018-02-17T22:23:39.658093: step 179, loss 0.488645, acc 0.75
2018-02-17T22:23:39.906751: step 180, loss 0.863101, acc 0.5625
2018-02-17T22:23:40.158422: step 181, loss 0.959462, acc 0.546875
2018-02-17T22:23:40.415104: step 182, loss 0.996491, acc 0.625
2018-02-17T22:23:40.660756: step 183, loss 1.02743, acc 0.46875
2018-02-17T22:23:40.910419: step 184, loss 0.862092, acc 0.625
2018-02-17T22:23:41.153066: step 185, loss 0.844177, acc 0.59375
2018-02-17T22:23:41.403733: step 186, loss 0.718054, acc 0.6875
2018-02-17T22:23:41.660417: step 187, loss 0.797246, acc 0.640625
2018-02-17T22:23:42.007339: step 188, loss 0.751253, acc 0.5625
2018-02-17T22:23:42.265023: step 189, loss 0.928972, acc 0.625
2018-02-17T22:23:42.515689: step 190, loss 1.03645, acc 0.515625
2018-02-17T22:23:42.809471: step 191, loss 0.845611, acc 0.609375
2018-02-17T22:23:43.094229: step 192, loss 0.708199, acc 0.6875
2018-02-17T22:23:43.335871: step 193, loss 0.874412, acc 0.65625
2018-02-17T22:23:43.603584: step 194, loss 1.04348, acc 0.59375
2018-02-17T22:23:43.849236: step 195, loss 0.997601, acc 0.578125
2018-02-17T22:23:44.088876: step 196, loss 0.97859, acc 0.625
2018-02-17T22:23:44.334529: step 197, loss 0.901213, acc 0.5625
2018-02-17T22:23:44.576173: step 198, loss 0.854954, acc 0.578125
2018-02-17T22:23:44.826261: step 199, loss 0.709572, acc 0.625
2018-02-17T22:23:45.072916: step 200, loss 0.970425, acc 0.625 Evaluation:
2018-02-17T22:23:45.900118: step 200, loss 0.658416, acc 0.610694 Saved model checkpoint to C:\Users\95890\eclipse-workspace\cnn-text-classification\runs\1518877370\checkpoints\model-200 2018-02-17T22:23:47.020216: step 201, loss 0.672312, acc 0.703125
2018-02-17T22:23:47.268878: step 202, loss 0.610235, acc 0.671875
2018-02-17T22:23:47.525559: step 203, loss 1.01205, acc 0.453125
2018-02-17T22:23:47.773220: step 204, loss 0.918158, acc 0.5625
2018-02-17T22:23:48.017869: step 205, loss 0.791164, acc 0.625
2018-02-17T22:23:48.272546: step 206, loss 0.75422, acc 0.59375
2018-02-17T22:23:48.525219: step 207, loss 0.774777, acc 0.65625
2018-02-17T22:23:48.767863: step 208, loss 0.756552, acc 0.65625
2018-02-17T22:23:49.014520: step 209, loss 0.920167, acc 0.671875
2018-02-17T22:23:49.258167: step 210, loss 0.863997, acc 0.59375
2018-02-17T22:23:49.497805: step 211, loss 0.762973, acc 0.640625
2018-02-17T22:23:49.749475: step 212, loss 0.728581, acc 0.640625
2018-02-17T22:23:50.024206: step 213, loss 1.00944, acc 0.46875
2018-02-17T22:23:50.266851: step 214, loss 0.784562, acc 0.65625
2018-02-17T22:23:50.510500: step 215, loss 0.665827, acc 0.671875
2018-02-17T22:23:50.750136: step 216, loss 0.790275, acc 0.625
2018-02-17T22:23:50.992782: step 217, loss 1.11809, acc 0.53125
2018-02-17T22:23:51.240440: step 218, loss 0.874787, acc 0.578125
2018-02-17T22:23:51.483085: step 219, loss 0.881097, acc 0.59375
2018-02-17T22:23:51.732750: step 220, loss 0.708284, acc 0.65625
2018-02-17T22:23:51.978403: step 221, loss 0.58689, acc 0.71875
2018-02-17T22:23:52.218040: step 222, loss 0.651873, acc 0.671875
2018-02-17T22:23:52.467687: step 223, loss 0.875721, acc 0.5625
2018-02-17T22:23:52.721361: step 224, loss 1.06595, acc 0.546875
2018-02-17T22:23:52.970025: step 225, loss 0.864003, acc 0.578125
2018-02-17T22:23:53.216680: step 226, loss 0.790093, acc 0.65625
2018-02-17T22:23:53.460327: step 227, loss 0.672423, acc 0.734375
2018-02-17T22:23:53.736060: step 228, loss 0.892856, acc 0.640625
2018-02-17T22:23:53.989735: step 229, loss 0.970857, acc 0.515625
2018-02-17T22:23:54.259453: step 230, loss 0.692266, acc 0.671875
2018-02-17T22:23:54.499089: step 231, loss 0.9676, acc 0.546875
2018-02-17T22:23:54.738727: step 232, loss 0.921943, acc 0.53125
2018-02-17T22:23:54.983379: step 233, loss 0.89515, acc 0.578125
2018-02-17T22:23:55.223015: step 234, loss 0.736431, acc 0.671875
2018-02-17T22:23:55.462652: step 235, loss 0.788206, acc 0.609375
2018-02-17T22:23:55.723347: step 236, loss 0.819368, acc 0.59375
2018-02-17T22:23:55.994067: step 237, loss 0.745105, acc 0.59375
2018-02-17T22:23:56.235708: step 238, loss 0.734398, acc 0.703125
2018-02-17T22:23:56.481362: step 239, loss 0.884329, acc 0.578125
2018-02-17T22:23:56.739047: step 240, loss 0.970229, acc 0.46875
2018-02-17T22:23:56.983697: step 241, loss 0.77223, acc 0.59375
2018-02-17T22:23:57.225340: step 242, loss 0.903473, acc 0.59375
2018-02-17T22:23:57.469991: step 243, loss 0.867376, acc 0.546875
2018-02-17T22:23:57.712636: step 244, loss 0.73152, acc 0.671875
2018-02-17T22:23:57.959292: step 245, loss 0.793619, acc 0.640625
2018-02-17T22:23:58.204946: step 246, loss 0.666538, acc 0.65625
2018-02-17T22:23:58.455613: step 247, loss 0.991384, acc 0.59375
2018-02-17T22:23:58.703271: step 248, loss 1.0508, acc 0.515625
2018-02-17T22:23:58.954941: step 249, loss 0.736867, acc 0.6875
2018-02-17T22:23:59.196583: step 250, loss 0.808228, acc 0.59375
2018-02-17T22:23:59.439229: step 251, loss 0.633869, acc 0.671875
2018-02-17T22:23:59.677864: step 252, loss 0.778929, acc 0.609375
2018-02-17T22:23:59.918504: step 253, loss 0.800023, acc 0.625
2018-02-17T22:24:00.181203: step 254, loss 0.640293, acc 0.703125
2018-02-17T22:24:00.420840: step 255, loss 0.739693, acc 0.640625
2018-02-17T22:24:00.661481: step 256, loss 0.896987, acc 0.65625
2018-02-17T22:24:00.907135: step 257, loss 0.779045, acc 0.625
2018-02-17T22:24:01.154794: step 258, loss 0.815463, acc 0.53125
2018-02-17T22:24:01.398441: step 259, loss 0.712556, acc 0.703125
2018-02-17T22:24:01.643091: step 260, loss 0.729555, acc 0.59375
2018-02-17T22:24:01.891753: step 261, loss 0.635836, acc 0.671875
2018-02-17T22:24:02.131390: step 262, loss 0.896136, acc 0.59375
2018-02-17T22:24:02.372029: step 263, loss 0.851616, acc 0.578125
2018-02-17T22:24:02.615678: step 264, loss 0.720664, acc 0.578125
2018-02-17T22:24:02.871359: step 265, loss 0.684132, acc 0.59375
2018-02-17T22:24:03.115006: step 266, loss 0.757671, acc 0.546875
2018-02-17T22:24:03.371686: step 267, loss 0.826008, acc 0.59375
2018-02-17T22:24:03.637395: step 268, loss 0.739467, acc 0.59375
2018-02-17T22:24:03.878033: step 269, loss 0.856889, acc 0.5625
2018-02-17T22:24:04.122684: step 270, loss 0.845038, acc 0.609375
2018-02-17T22:24:04.368337: step 271, loss 0.754811, acc 0.59375
2018-02-17T22:24:04.602961: step 272, loss 0.687208, acc 0.625
2018-02-17T22:24:04.856636: step 273, loss 0.720359, acc 0.640625
2018-02-17T22:24:05.105297: step 274, loss 0.590739, acc 0.6875
2018-02-17T22:24:05.344936: step 275, loss 0.683003, acc 0.671875
2018-02-17T22:24:05.593596: step 276, loss 0.843017, acc 0.59375
2018-02-17T22:24:05.858300: step 277, loss 1.00144, acc 0.59375
2018-02-17T22:24:06.101951: step 278, loss 0.722595, acc 0.671875
2018-02-17T22:24:06.347601: step 279, loss 0.720182, acc 0.671875
2018-02-17T22:24:06.589244: step 280, loss 0.639732, acc 0.640625
2018-02-17T22:24:06.838908: step 281, loss 0.739676, acc 0.65625
2018-02-17T22:24:07.093585: step 282, loss 0.790985, acc 0.609375
2018-02-17T22:24:07.337234: step 283, loss 0.879859, acc 0.515625
2018-02-17T22:24:07.577873: step 284, loss 0.920295, acc 0.609375
2018-02-17T22:24:07.823527: step 285, loss 0.809854, acc 0.59375
2018-02-17T22:24:08.069181: step 286, loss 0.891706, acc 0.5625
2018-02-17T22:24:08.306813: step 287, loss 0.824966, acc 0.5625
2018-02-17T22:24:08.544445: step 288, loss 0.759136, acc 0.625
2018-02-17T22:24:08.787091: step 289, loss 0.753416, acc 0.578125
2018-02-17T22:24:09.037756: step 290, loss 0.671549, acc 0.703125
2018-02-17T22:24:09.285415: step 291, loss 0.722183, acc 0.65625
2018-02-17T22:24:09.536082: step 292, loss 0.95078, acc 0.546875
2018-02-17T22:24:09.784745: step 293, loss 0.819654, acc 0.609375
2018-02-17T22:24:10.034412: step 294, loss 0.773082, acc 0.625
2018-02-17T22:24:10.276050: step 295, loss 0.674519, acc 0.625
2018-02-17T22:24:10.516691: step 296, loss 1.00588, acc 0.4375
2018-02-17T22:24:10.768360: step 297, loss 0.72542, acc 0.578125
2018-02-17T22:24:11.014014: step 298, loss 0.79415, acc 0.578125
2018-02-17T22:24:11.256658: step 299, loss 0.822355, acc 0.59375
2018-02-17T22:24:11.488274: step 300, loss 0.858961, acc 0.616667 Evaluation:
2018-02-17T22:24:12.302441: step 300, loss 0.634211, acc 0.641651 Saved model checkpoint to C:\Users\95890\eclipse-workspace\cnn-text-classification\runs\1518877370\checkpoints\model-300 2018-02-17T22:24:13.353977: step 301, loss 0.668762, acc 0.671875
2018-02-17T22:24:13.621691: step 302, loss 0.644594, acc 0.65625
2018-02-17T22:24:13.867342: step 303, loss 0.664237, acc 0.65625
2018-02-17T22:24:14.111993: step 304, loss 0.603637, acc 0.75
2018-02-17T22:24:14.356643: step 305, loss 0.93433, acc 0.53125
2018-02-17T22:24:14.603299: step 306, loss 0.719889, acc 0.59375
2018-02-17T22:24:14.845945: step 307, loss 0.732325, acc 0.703125
2018-02-17T22:24:15.093604: step 308, loss 0.681458, acc 0.609375
2018-02-17T22:24:15.337252: step 309, loss 0.589608, acc 0.59375
2018-02-17T22:24:15.580901: step 310, loss 0.651723, acc 0.71875
2018-02-17T22:24:15.836580: step 311, loss 0.604413, acc 0.671875
2018-02-17T22:24:16.098276: step 312, loss 0.62269, acc 0.703125
2018-02-17T22:24:16.337915: step 313, loss 0.611882, acc 0.734375
2018-02-17T22:24:16.584570: step 314, loss 0.633484, acc 0.671875
2018-02-17T22:24:16.835236: step 315, loss 0.602097, acc 0.703125
2018-02-17T22:24:17.078884: step 316, loss 0.665506, acc 0.578125
2018-02-17T22:24:17.324538: step 317, loss 0.674811, acc 0.65625
2018-02-17T22:24:17.574204: step 318, loss 0.660865, acc 0.734375
2018-02-17T22:24:17.819855: step 319, loss 0.518028, acc 0.75
2018-02-17T22:24:18.068517: step 320, loss 0.796152, acc 0.609375
2018-02-17T22:24:18.306149: step 321, loss 0.619835, acc 0.671875
2018-02-17T22:24:18.550799: step 322, loss 0.761612, acc 0.65625
2018-02-17T22:24:18.789434: step 323, loss 0.604972, acc 0.6875
2018-02-17T22:24:19.031077: step 324, loss 0.570458, acc 0.703125
2018-02-17T22:24:19.273722: step 325, loss 0.764736, acc 0.5625
2018-02-17T22:24:19.518372: step 326, loss 0.635958, acc 0.703125
2018-02-17T22:24:19.757009: step 327, loss 0.640578, acc 0.640625
2018-02-17T22:24:20.003663: step 328, loss 0.585828, acc 0.703125
2018-02-17T22:24:20.258342: step 329, loss 0.613925, acc 0.703125
2018-02-17T22:24:20.502991: step 330, loss 0.569395, acc 0.75
2018-02-17T22:24:20.749648: step 331, loss 0.544942, acc 0.75
2018-02-17T22:24:21.004325: step 332, loss 0.774088, acc 0.625
2018-02-17T22:24:21.241958: step 333, loss 0.575258, acc 0.703125
2018-02-17T22:24:21.490618: step 334, loss 0.613751, acc 0.65625
2018-02-17T22:24:21.740283: step 335, loss 0.761731, acc 0.640625
2018-02-17T22:24:21.985936: step 336, loss 0.55553, acc 0.703125
2018-02-17T22:24:22.238608: step 337, loss 0.81347, acc 0.53125
2018-02-17T22:24:22.480253: step 338, loss 0.560819, acc 0.703125
2018-02-17T22:24:22.743944: step 339, loss 0.458766, acc 0.8125
2018-02-17T22:24:23.054770: step 340, loss 0.517656, acc 0.765625
2018-02-17T22:24:23.362590: step 341, loss 0.597648, acc 0.671875
2018-02-17T22:24:23.701491: step 342, loss 0.571574, acc 0.703125
2018-02-17T22:24:24.026355: step 343, loss 0.571678, acc 0.671875
2018-02-17T22:24:24.357236: step 344, loss 0.640245, acc 0.703125
2018-02-17T22:24:24.672072: step 345, loss 0.499579, acc 0.796875
2018-02-17T22:24:24.930761: step 346, loss 0.664821, acc 0.625
2018-02-17T22:24:25.164381: step 347, loss 0.531874, acc 0.734375
2018-02-17T22:24:25.414071: step 348, loss 0.602986, acc 0.703125
2018-02-17T22:24:25.659700: step 349, loss 0.554402, acc 0.671875
2018-02-17T22:24:25.943320: step 350, loss 0.648171, acc 0.703125
2018-02-17T22:24:26.208024: step 351, loss 0.548233, acc 0.625
2018-02-17T22:24:26.453680: step 352, loss 0.597948, acc 0.671875
2018-02-17T22:24:26.707355: step 353, loss 0.633063, acc 0.703125
2018-02-17T22:24:26.958020: step 354, loss 0.658462, acc 0.65625
2018-02-17T22:24:27.207686: step 355, loss 0.543372, acc 0.71875
2018-02-17T22:24:27.462361: step 356, loss 0.580617, acc 0.734375
2018-02-17T22:24:27.714031: step 357, loss 0.901522, acc 0.546875
2018-02-17T22:24:27.967705: step 358, loss 0.585599, acc 0.734375
2018-02-17T22:24:28.218372: step 359, loss 0.572628, acc 0.671875
2018-02-17T22:24:28.469040: step 360, loss 0.591471, acc 0.65625
2018-02-17T22:24:28.718702: step 361, loss 0.613531, acc 0.71875
2018-02-17T22:24:28.973380: step 362, loss 0.576058, acc 0.71875
2018-02-17T22:24:29.222041: step 363, loss 0.682037, acc 0.671875
2018-02-17T22:24:29.477721: step 364, loss 0.665784, acc 0.640625
2018-02-17T22:24:29.733401: step 365, loss 0.680652, acc 0.703125
2018-02-17T22:24:29.987076: step 366, loss 0.594415, acc 0.703125
2018-02-17T22:24:30.238746: step 367, loss 0.583261, acc 0.765625
2018-02-17T22:24:30.497436: step 368, loss 0.563779, acc 0.609375
2018-02-17T22:24:30.746104: step 369, loss 0.708905, acc 0.625
2018-02-17T22:24:30.996762: step 370, loss 0.790087, acc 0.5625
2018-02-17T22:24:31.245425: step 371, loss 0.636762, acc 0.703125
2018-02-17T22:24:31.505114: step 372, loss 0.694305, acc 0.578125
2018-02-17T22:24:31.759791: step 373, loss 0.636606, acc 0.71875
2018-02-17T22:24:32.013468: step 374, loss 0.621469, acc 0.71875
2018-02-17T22:24:32.268144: step 375, loss 0.583082, acc 0.6875
2018-02-17T22:24:32.516807: step 376, loss 0.579761, acc 0.671875
2018-02-17T22:24:32.774500: step 377, loss 0.555592, acc 0.671875
2018-02-17T22:24:33.028166: step 378, loss 0.795011, acc 0.546875
2018-02-17T22:24:33.279835: step 379, loss 0.50606, acc 0.75
2018-02-17T22:24:33.531505: step 380, loss 0.589718, acc 0.71875
2018-02-17T22:24:33.811249: step 381, loss 0.747198, acc 0.671875
2018-02-17T22:24:34.063920: step 382, loss 0.590681, acc 0.71875
2018-02-17T22:24:34.321606: step 383, loss 0.57931, acc 0.71875
2018-02-17T22:24:34.574276: step 384, loss 0.803362, acc 0.578125
2018-02-17T22:24:34.826950: step 385, loss 0.556127, acc 0.703125
2018-02-17T22:24:35.078621: step 386, loss 0.668578, acc 0.640625
2018-02-17T22:24:35.327280: step 387, loss 0.596383, acc 0.6875
2018-02-17T22:24:35.575942: step 388, loss 0.708604, acc 0.65625
2018-02-17T22:24:35.838639: step 389, loss 0.578459, acc 0.640625
2018-02-17T22:24:36.108360: step 390, loss 0.641538, acc 0.65625
2018-02-17T22:24:36.353008: step 391, loss 0.581909, acc 0.65625
2018-02-17T22:24:36.601670: step 392, loss 0.656502, acc 0.6875
2018-02-17T22:24:36.859355: step 393, loss 0.579553, acc 0.734375
2018-02-17T22:24:37.110023: step 394, loss 0.739661, acc 0.640625
2018-02-17T22:24:37.355675: step 395, loss 0.539607, acc 0.75
2018-02-17T22:24:37.623387: step 396, loss 0.695101, acc 0.65625
2018-02-17T22:24:37.871047: step 397, loss 0.551378, acc 0.75
2018-02-17T22:24:38.119707: step 398, loss 0.627957, acc 0.625
2018-02-17T22:24:38.368370: step 399, loss 0.583331, acc 0.75
2018-02-17T22:24:38.616027: step 400, loss 0.67079, acc 0.640625 Evaluation:
2018-02-17T22:24:39.421169: step 400, loss 0.656722, acc 0.591932 Saved model checkpoint to C:\Users\95890\eclipse-workspace\cnn-text-classification\runs\1518877370\checkpoints\model-400 2018-02-17T22:24:40.479067: step 401, loss 0.653703, acc 0.578125
2018-02-17T22:24:40.725723: step 402, loss 0.527113, acc 0.796875
2018-02-17T22:24:40.977399: step 403, loss 0.602188, acc 0.734375
2018-02-17T22:24:41.222043: step 404, loss 0.479569, acc 0.765625
2018-02-17T22:24:41.471708: step 405, loss 0.651476, acc 0.609375
2018-02-17T22:24:41.723376: step 406, loss 0.500806, acc 0.84375
2018-02-17T22:24:41.977052: step 407, loss 0.531158, acc 0.796875
2018-02-17T22:24:42.226716: step 408, loss 0.634903, acc 0.65625
2018-02-17T22:24:42.475376: step 409, loss 0.70123, acc 0.625
2018-02-17T22:24:42.737073: step 410, loss 0.629178, acc 0.671875
2018-02-17T22:24:42.994758: step 411, loss 0.668576, acc 0.59375
2018-02-17T22:24:43.238407: step 412, loss 0.576771, acc 0.703125
2018-02-17T22:24:43.489072: step 413, loss 0.593387, acc 0.6875
2018-02-17T22:24:43.766811: step 414, loss 0.787625, acc 0.703125
2018-02-17T22:24:44.017478: step 415, loss 0.60951, acc 0.6875
2018-02-17T22:24:44.262130: step 416, loss 0.660242, acc 0.671875
2018-02-17T22:24:44.507782: step 417, loss 0.718749, acc 0.625
2018-02-17T22:24:44.752432: step 418, loss 0.446617, acc 0.8125
2018-02-17T22:24:45.002096: step 419, loss 0.791225, acc 0.59375
2018-02-17T22:24:45.250758: step 420, loss 0.557713, acc 0.703125
2018-02-17T22:24:45.496411: step 421, loss 0.652933, acc 0.71875
2018-02-17T22:24:45.745073: step 422, loss 0.747765, acc 0.65625
2018-02-17T22:24:46.024817: step 423, loss 0.687486, acc 0.625
2018-02-17T22:24:46.271474: step 424, loss 0.611475, acc 0.671875
2018-02-17T22:24:46.525148: step 425, loss 0.630742, acc 0.65625
2018-02-17T22:24:46.771803: step 426, loss 0.542023, acc 0.703125
2018-02-17T22:24:47.026481: step 427, loss 0.522554, acc 0.765625
2018-02-17T22:24:47.274140: step 428, loss 0.617971, acc 0.6875
2018-02-17T22:24:47.525811: step 429, loss 0.761342, acc 0.609375
2018-02-17T22:24:47.773469: step 430, loss 0.515104, acc 0.765625
2018-02-17T22:24:48.020125: step 431, loss 0.621135, acc 0.6875
2018-02-17T22:24:48.267783: step 432, loss 0.656797, acc 0.640625
2018-02-17T22:24:48.523463: step 433, loss 0.626402, acc 0.671875
2018-02-17T22:24:48.771121: step 434, loss 0.677876, acc 0.671875
2018-02-17T22:24:49.019784: step 435, loss 0.588754, acc 0.671875
2018-02-17T22:24:49.267441: step 436, loss 0.652495, acc 0.6875
2018-02-17T22:24:49.517106: step 437, loss 0.657041, acc 0.609375
2018-02-17T22:24:49.783049: step 438, loss 0.642775, acc 0.6875
2018-02-17T22:24:50.032713: step 439, loss 0.544278, acc 0.703125
2018-02-17T22:24:50.286390: step 440, loss 0.609127, acc 0.703125
2018-02-17T22:24:50.530037: step 441, loss 0.683116, acc 0.578125
2018-02-17T22:24:50.780704: step 442, loss 0.758316, acc 0.625
2018-02-17T22:24:51.060892: step 443, loss 0.694763, acc 0.609375
2018-02-17T22:24:51.311560: step 444, loss 0.632301, acc 0.609375
2018-02-17T22:24:51.555206: step 445, loss 0.545569, acc 0.671875
2018-02-17T22:24:51.802866: step 446, loss 0.682929, acc 0.609375
2018-02-17T22:24:52.058546: step 447, loss 0.719808, acc 0.625
2018-02-17T22:24:52.303196: step 448, loss 0.668347, acc 0.578125
2018-02-17T22:24:52.554866: step 449, loss 0.65534, acc 0.65625
2018-02-17T22:24:52.800520: step 450, loss 0.616967, acc 0.683333
2018-02-17T22:24:53.059207: step 451, loss 0.699269, acc 0.65625
2018-02-17T22:24:53.319900: step 452, loss 0.618532, acc 0.6875
2018-02-17T22:24:53.575580: step 453, loss 0.576288, acc 0.671875
2018-02-17T22:24:53.834269: step 454, loss 0.495917, acc 0.765625
2018-02-17T22:24:54.083934: step 455, loss 0.614118, acc 0.71875
2018-02-17T22:24:54.340616: step 456, loss 0.568083, acc 0.75
2018-02-17T22:24:54.595293: step 457, loss 0.573078, acc 0.65625
2018-02-17T22:24:54.848967: step 458, loss 0.569558, acc 0.796875
2018-02-17T22:24:55.098631: step 459, loss 0.674649, acc 0.59375
2018-02-17T22:24:55.349299: step 460, loss 0.665609, acc 0.609375
2018-02-17T22:24:55.602974: step 461, loss 0.640674, acc 0.625
2018-02-17T22:24:55.868681: step 462, loss 0.553053, acc 0.734375
2018-02-17T22:24:56.124359: step 463, loss 0.567342, acc 0.6875
2018-02-17T22:24:56.370014: step 464, loss 0.604058, acc 0.625
2018-02-17T22:24:56.617671: step 465, loss 0.564018, acc 0.734375
2018-02-17T22:24:56.871347: step 466, loss 0.631816, acc 0.59375
2018-02-17T22:24:57.129031: step 467, loss 0.472171, acc 0.796875
2018-02-17T22:24:57.376691: step 468, loss 0.685074, acc 0.578125
2018-02-17T22:24:57.627359: step 469, loss 0.55354, acc 0.734375
2018-02-17T22:24:57.894069: step 470, loss 0.574782, acc 0.671875
2018-02-17T22:24:58.143730: step 471, loss 0.568664, acc 0.6875
2018-02-17T22:24:58.426433: step 472, loss 0.592111, acc 0.640625
2018-02-17T22:24:58.677099: step 473, loss 0.550215, acc 0.703125
2018-02-17T22:24:58.926763: step 474, loss 0.644087, acc 0.625
2018-02-17T22:24:59.174424: step 475, loss 0.559168, acc 0.78125
2018-02-17T22:24:59.423084: step 476, loss 0.576948, acc 0.6875
2018-02-17T22:24:59.674753: step 477, loss 0.588681, acc 0.765625
2018-02-17T22:24:59.928427: step 478, loss 0.570484, acc 0.71875
2018-02-17T22:25:00.181100: step 479, loss 0.479898, acc 0.765625
2018-02-17T22:25:00.435777: step 480, loss 0.63002, acc 0.6875
2018-02-17T22:25:00.682433: step 481, loss 0.604169, acc 0.71875
2018-02-17T22:25:00.933099: step 482, loss 0.567558, acc 0.640625
2018-02-17T22:25:01.181762: step 483, loss 0.583267, acc 0.71875
2018-02-17T22:25:01.440449: step 484, loss 0.680577, acc 0.625
2018-02-17T22:25:01.692122: step 485, loss 0.594916, acc 0.71875
2018-02-17T22:25:01.945793: step 486, loss 0.697628, acc 0.6875
2018-02-17T22:25:02.191446: step 487, loss 0.522853, acc 0.78125
2018-02-17T22:25:02.439106: step 488, loss 0.5267, acc 0.78125
2018-02-17T22:25:02.697793: step 489, loss 0.512544, acc 0.71875
2018-02-17T22:25:02.947457: step 490, loss 0.513853, acc 0.75
2018-02-17T22:25:03.193110: step 491, loss 0.48337, acc 0.765625
2018-02-17T22:25:03.441773: step 492, loss 0.528031, acc 0.71875
2018-02-17T22:25:03.717506: step 493, loss 0.548244, acc 0.75
2018-02-17T22:25:03.968174: step 494, loss 0.758521, acc 0.578125
2018-02-17T22:25:04.211820: step 495, loss 0.576537, acc 0.765625
2018-02-17T22:25:04.458476: step 496, loss 0.470812, acc 0.8125
2018-02-17T22:25:04.714157: step 497, loss 0.684608, acc 0.65625
2018-02-17T22:25:04.965827: step 498, loss 0.541854, acc 0.6875
2018-02-17T22:25:05.210476: step 499, loss 0.510172, acc 0.703125
2018-02-17T22:25:05.457134: step 500, loss 0.555611, acc 0.703125 Evaluation:
2018-02-17T22:25:06.299373: step 500, loss 0.612761, acc 0.663227 Saved model checkpoint to C:\Users\95890\eclipse-workspace\cnn-text-classification\runs\1518877370\checkpoints\model-500 2018-02-17T22:25:07.308629: step 501, loss 0.547714, acc 0.765625
2018-02-17T22:25:07.563307: step 502, loss 0.598246, acc 0.6875
2018-02-17T22:25:07.819622: step 503, loss 0.599052, acc 0.65625
2018-02-17T22:25:08.070289: step 504, loss 0.551295, acc 0.703125
2018-02-17T22:25:08.318952: step 505, loss 0.487287, acc 0.75
2018-02-17T22:25:08.570619: step 506, loss 0.552506, acc 0.71875
2018-02-17T22:25:08.824294: step 507, loss 0.588262, acc 0.65625
2018-02-17T22:25:09.067942: step 508, loss 0.48301, acc 0.828125
2018-02-17T22:25:09.319613: step 509, loss 0.58832, acc 0.734375
2018-02-17T22:25:09.570278: step 510, loss 0.601218, acc 0.65625
2018-02-17T22:25:09.817937: step 511, loss 0.472677, acc 0.765625
2018-02-17T22:25:10.067602: step 512, loss 0.638508, acc 0.734375
2018-02-17T22:25:10.316264: step 513, loss 0.619788, acc 0.671875
2018-02-17T22:25:10.559910: step 514, loss 0.617854, acc 0.671875
2018-02-17T22:25:10.812582: step 515, loss 0.589214, acc 0.71875
2018-02-17T22:25:11.060241: step 516, loss 0.593137, acc 0.703125
2018-02-17T22:25:11.319932: step 517, loss 0.545062, acc 0.75
2018-02-17T22:25:11.568593: step 518, loss 0.47419, acc 0.765625
2018-02-17T22:25:11.816252: step 519, loss 0.52622, acc 0.765625
2018-02-17T22:25:12.068924: step 520, loss 0.560513, acc 0.71875
2018-02-17T22:25:12.319591: step 521, loss 0.633835, acc 0.625
2018-02-17T22:25:12.567249: step 522, loss 0.545716, acc 0.703125
2018-02-17T22:25:12.825939: step 523, loss 0.519759, acc 0.75
2018-02-17T22:25:13.082621: step 524, loss 0.483072, acc 0.78125
2018-02-17T22:25:13.330280: step 525, loss 0.565181, acc 0.6875
2018-02-17T22:25:13.589970: step 526, loss 0.462592, acc 0.765625
2018-02-17T22:25:13.844646: step 527, loss 0.608609, acc 0.71875
2018-02-17T22:25:14.093310: step 528, loss 0.564282, acc 0.671875
2018-02-17T22:25:14.338961: step 529, loss 0.561088, acc 0.75
2018-02-17T22:25:14.586620: step 530, loss 0.506878, acc 0.75
2018-02-17T22:25:14.836284: step 531, loss 0.452299, acc 0.796875
2018-02-17T22:25:15.081938: step 532, loss 0.48264, acc 0.78125
2018-02-17T22:25:15.334610: step 533, loss 0.599345, acc 0.703125
2018-02-17T22:25:15.617362: step 534, loss 0.509364, acc 0.78125
2018-02-17T22:25:15.928189: step 535, loss 0.420879, acc 0.78125
2018-02-17T22:25:16.220967: step 536, loss 0.636148, acc 0.6875
2018-02-17T22:25:16.476647: step 537, loss 0.473447, acc 0.796875
2018-02-17T22:25:16.732328: step 538, loss 0.536264, acc 0.65625
2018-02-17T22:25:16.978983: step 539, loss 0.687249, acc 0.609375
2018-02-17T22:25:17.230653: step 540, loss 0.555216, acc 0.78125
2018-02-17T22:25:17.480316: step 541, loss 0.594576, acc 0.671875
2018-02-17T22:25:17.722962: step 542, loss 0.530586, acc 0.765625
2018-02-17T22:25:18.004711: step 543, loss 0.604585, acc 0.671875
2018-02-17T22:25:18.259389: step 544, loss 0.604815, acc 0.703125
2018-02-17T22:25:18.506046: step 545, loss 0.67011, acc 0.625
2018-02-17T22:25:18.751699: step 546, loss 0.677043, acc 0.65625
2018-02-17T22:25:19.007380: step 547, loss 0.503046, acc 0.75
2018-02-17T22:25:19.255037: step 548, loss 0.644684, acc 0.703125
2018-02-17T22:25:19.508712: step 549, loss 0.661804, acc 0.734375
2018-02-17T22:25:19.764392: step 550, loss 0.612215, acc 0.609375
2018-02-17T22:25:20.013054: step 551, loss 0.514639, acc 0.78125
2018-02-17T22:25:20.257704: step 552, loss 0.663648, acc 0.609375
2018-02-17T22:25:20.508373: step 553, loss 0.565956, acc 0.703125
2018-02-17T22:25:20.756600: step 554, loss 0.594708, acc 0.734375
2018-02-17T22:25:21.009272: step 555, loss 0.402201, acc 0.796875
2018-02-17T22:25:21.256931: step 556, loss 0.701473, acc 0.671875
2018-02-17T22:25:21.508602: step 557, loss 0.548869, acc 0.75
2018-02-17T22:25:21.761272: step 558, loss 0.782913, acc 0.59375
2018-02-17T22:25:22.013945: step 559, loss 0.515923, acc 0.796875
2018-02-17T22:25:22.260600: step 560, loss 0.526381, acc 0.765625
2018-02-17T22:25:22.530024: step 561, loss 0.515515, acc 0.734375
2018-02-17T22:25:22.782694: step 562, loss 0.546666, acc 0.6875
2018-02-17T22:25:23.036370: step 563, loss 0.566597, acc 0.65625
2018-02-17T22:25:23.282022: step 564, loss 0.556931, acc 0.6875
2018-02-17T22:25:23.535698: step 565, loss 0.55656, acc 0.71875
2018-02-17T22:25:23.805415: step 566, loss 0.473409, acc 0.796875
2018-02-17T22:25:24.055079: step 567, loss 0.497336, acc 0.765625
2018-02-17T22:25:24.302738: step 568, loss 0.567078, acc 0.703125
2018-02-17T22:25:24.546388: step 569, loss 0.57758, acc 0.71875
2018-02-17T22:25:24.789032: step 570, loss 0.607702, acc 0.671875
2018-02-17T22:25:25.040700: step 571, loss 0.48377, acc 0.765625
2018-02-17T22:25:25.286353: step 572, loss 0.446802, acc 0.75
2018-02-17T22:25:25.533013: step 573, loss 0.583836, acc 0.65625
2018-02-17T22:25:25.860883: step 574, loss 0.545369, acc 0.75
2018-02-17T22:25:26.173714: step 575, loss 0.546338, acc 0.765625
2018-02-17T22:25:26.471506: step 576, loss 0.457515, acc 0.796875
2018-02-17T22:25:26.767294: step 577, loss 0.594007, acc 0.671875
2018-02-17T22:25:27.057063: step 578, loss 0.563933, acc 0.6875
2018-02-17T22:25:27.335805: step 579, loss 0.512036, acc 0.78125
2018-02-17T22:25:27.646632: step 580, loss 0.61137, acc 0.71875
2018-02-17T22:25:27.912338: step 581, loss 0.563114, acc 0.671875
2018-02-17T22:25:28.194088: step 582, loss 0.580811, acc 0.6875
2018-02-17T22:25:28.477843: step 583, loss 0.45989, acc 0.796875
2018-02-17T22:25:28.739539: step 584, loss 0.622126, acc 0.6875
2018-02-17T22:25:29.037331: step 585, loss 0.521067, acc 0.765625
2018-02-17T22:25:29.365203: step 586, loss 0.467932, acc 0.828125
2018-02-17T22:25:29.745213: step 587, loss 0.568833, acc 0.703125
2018-02-17T22:25:30.021949: step 588, loss 0.592322, acc 0.734375
2018-02-17T22:25:30.287657: step 589, loss 0.55995, acc 0.6875
2018-02-17T22:25:30.540328: step 590, loss 0.615904, acc 0.6875
2018-02-17T22:25:30.890259: step 591, loss 0.614801, acc 0.734375
2018-02-17T22:25:31.235178: step 592, loss 0.437756, acc 0.828125
2018-02-17T22:25:31.628222: step 593, loss 0.483466, acc 0.734375
2018-02-17T22:25:31.991188: step 594, loss 0.493693, acc 0.78125
2018-02-17T22:25:32.346132: step 595, loss 0.447643, acc 0.78125
2018-02-17T22:25:32.639914: step 596, loss 0.597336, acc 0.6875
2018-02-17T22:25:32.942719: step 597, loss 0.547579, acc 0.703125
2018-02-17T22:25:33.221459: step 598, loss 0.581602, acc 0.703125
2018-02-17T22:25:33.513237: step 599, loss 0.509432, acc 0.703125
2018-02-17T22:25:33.806015: step 600, loss 0.491237, acc 0.783333 Evaluation:
2018-02-17T22:25:34.686356: step 600, loss 0.648985, acc 0.605066 Saved model checkpoint to C:\Users\95890\eclipse-workspace\cnn-text-classification\runs\1518877370\checkpoints\model-600 2018-02-17T22:25:36.106371: step 601, loss 0.655979, acc 0.65625
2018-02-17T22:25:36.470340: step 602, loss 0.591379, acc 0.65625
2018-02-17T22:25:36.778158: step 603, loss 0.520699, acc 0.765625
2018-02-17T22:25:37.065924: step 604, loss 0.554492, acc 0.703125
2018-02-17T22:25:37.370251: step 605, loss 0.497337, acc 0.796875
2018-02-17T22:25:37.609889: step 606, loss 0.435771, acc 0.8125
2018-02-17T22:25:37.853537: step 607, loss 0.536794, acc 0.78125
2018-02-17T22:25:38.091169: step 608, loss 0.5092, acc 0.75
2018-02-17T22:25:38.331810: step 609, loss 0.458527, acc 0.78125
2018-02-17T22:25:38.570444: step 610, loss 0.445105, acc 0.796875
2018-02-17T22:25:38.807074: step 611, loss 0.491639, acc 0.796875
2018-02-17T22:25:39.052729: step 612, loss 0.510547, acc 0.8125
2018-02-17T22:25:39.293367: step 613, loss 0.529783, acc 0.78125
2018-02-17T22:25:39.538017: step 614, loss 0.566362, acc 0.6875
2018-02-17T22:25:39.782669: step 615, loss 0.608538, acc 0.65625
2018-02-17T22:25:40.025314: step 616, loss 0.529096, acc 0.703125
2018-02-17T22:25:40.278991: step 617, loss 0.581925, acc 0.734375
2018-02-17T22:25:40.518626: step 618, loss 0.418302, acc 0.828125
2018-02-17T22:25:40.764279: step 619, loss 0.462804, acc 0.765625
2018-02-17T22:25:41.004919: step 620, loss 0.648658, acc 0.6875
2018-02-17T22:25:41.254583: step 621, loss 0.469623, acc 0.8125
2018-02-17T22:25:41.505250: step 622, loss 0.460526, acc 0.75
2018-02-17T22:25:41.742882: step 623, loss 0.431695, acc 0.8125
2018-02-17T22:25:41.991544: step 624, loss 0.491703, acc 0.8125
2018-02-17T22:25:42.298359: step 625, loss 0.344727, acc 0.875
2018-02-17T22:25:42.535991: step 626, loss 0.523917, acc 0.75
2018-02-17T22:25:42.790670: step 627, loss 0.4709, acc 0.75
2018-02-17T22:25:43.034317: step 628, loss 0.560175, acc 0.78125
2018-02-17T22:25:43.272952: step 629, loss 0.465415, acc 0.8125
2018-02-17T22:25:43.520610: step 630, loss 0.42566, acc 0.796875
2018-02-17T22:25:43.783309: step 631, loss 0.439573, acc 0.828125
2018-02-17T22:25:44.028962: step 632, loss 0.573904, acc 0.71875
2018-02-17T22:25:44.270605: step 633, loss 0.504172, acc 0.734375
2018-02-17T22:25:44.513251: step 634, loss 0.518956, acc 0.734375
2018-02-17T22:25:44.753891: step 635, loss 0.543299, acc 0.78125
2018-02-17T22:25:45.000546: step 636, loss 0.533023, acc 0.6875
2018-02-17T22:25:45.236173: step 637, loss 0.554011, acc 0.75
2018-02-17T22:25:45.475811: step 638, loss 0.503621, acc 0.765625
2018-02-17T22:25:45.712441: step 639, loss 0.430669, acc 0.796875
2018-02-17T22:25:45.975145: step 640, loss 0.416849, acc 0.78125
2018-02-17T22:25:46.229815: step 641, loss 0.713724, acc 0.640625
2018-02-17T22:25:46.485496: step 642, loss 0.561258, acc 0.78125
2018-02-17T22:25:46.737165: step 643, loss 0.481738, acc 0.796875
2018-02-17T22:25:46.991842: step 644, loss 0.490226, acc 0.734375
2018-02-17T22:25:47.240506: step 645, loss 0.519218, acc 0.734375
2018-02-17T22:25:47.490167: step 646, loss 0.477219, acc 0.78125
2018-02-17T22:25:47.732815: step 647, loss 0.592474, acc 0.75
2018-02-17T22:25:47.980474: step 648, loss 0.571244, acc 0.734375
2018-02-17T22:25:48.229134: step 649, loss 0.482516, acc 0.75
2018-02-17T22:25:48.474787: step 650, loss 0.411057, acc 0.859375
2018-02-17T22:25:48.726457: step 651, loss 0.507974, acc 0.75
2018-02-17T22:25:48.978128: step 652, loss 0.559912, acc 0.78125
2018-02-17T22:25:49.224781: step 653, loss 0.501644, acc 0.78125
2018-02-17T22:25:49.464419: step 654, loss 0.482478, acc 0.71875
2018-02-17T22:25:49.706062: step 655, loss 0.564181, acc 0.6875
2018-02-17T22:25:49.954723: step 656, loss 0.496904, acc 0.78125
2018-02-17T22:25:50.203384: step 657, loss 0.539878, acc 0.6875
2018-02-17T22:25:50.441017: step 658, loss 0.542881, acc 0.71875
2018-02-17T22:25:50.689679: step 659, loss 0.405253, acc 0.765625
2018-02-17T22:25:50.937337: step 660, loss 0.428409, acc 0.765625
2018-02-17T22:25:51.192742: step 661, loss 0.464748, acc 0.71875
2018-02-17T22:25:51.434386: step 662, loss 0.511609, acc 0.734375
2018-02-17T22:25:51.682044: step 663, loss 0.624769, acc 0.703125
2018-02-17T22:25:51.925694: step 664, loss 0.550153, acc 0.75
2018-02-17T22:25:52.180370: step 665, loss 0.537141, acc 0.765625
2018-02-17T22:25:52.441063: step 666, loss 0.481873, acc 0.75
2018-02-17T22:25:52.696743: step 667, loss 0.43699, acc 0.828125
2018-02-17T22:25:52.941394: step 668, loss 0.420006, acc 0.859375
2018-02-17T22:25:53.186044: step 669, loss 0.534589, acc 0.734375
2018-02-17T22:25:53.433703: step 670, loss 0.505673, acc 0.78125
2018-02-17T22:25:53.708434: step 671, loss 0.515514, acc 0.6875
2018-02-17T22:25:53.956094: step 672, loss 0.660347, acc 0.703125
2018-02-17T22:25:54.194734: step 673, loss 0.442458, acc 0.78125
2018-02-17T22:25:54.433362: step 674, loss 0.383785, acc 0.828125
2018-02-17T22:25:54.676008: step 675, loss 0.50234, acc 0.78125
2018-02-17T22:25:54.916647: step 676, loss 0.65407, acc 0.703125
2018-02-17T22:25:55.156287: step 677, loss 0.49667, acc 0.71875
2018-02-17T22:25:55.405948: step 678, loss 0.515638, acc 0.734375
2018-02-17T22:25:55.657619: step 679, loss 0.551974, acc 0.703125
2018-02-17T22:25:55.930344: step 680, loss 0.45876, acc 0.78125
2018-02-17T22:25:56.176000: step 681, loss 0.408383, acc 0.8125
2018-02-17T22:25:56.415636: step 682, loss 0.386885, acc 0.84375
2018-02-17T22:25:56.663293: step 683, loss 0.608487, acc 0.671875
2018-02-17T22:25:56.933011: step 684, loss 0.444663, acc 0.765625
2018-02-17T22:25:57.176659: step 685, loss 0.573507, acc 0.671875
2018-02-17T22:25:57.426322: step 686, loss 0.458146, acc 0.828125
2018-02-17T22:25:57.670975: step 687, loss 0.434775, acc 0.828125
2018-02-17T22:25:57.918632: step 688, loss 0.360829, acc 0.828125
2018-02-17T22:25:58.161277: step 689, loss 0.494623, acc 0.765625
2018-02-17T22:25:58.410942: step 690, loss 0.57628, acc 0.71875
2018-02-17T22:25:58.653586: step 691, loss 0.479851, acc 0.765625
2018-02-17T22:25:58.906259: step 692, loss 0.674853, acc 0.671875
2018-02-17T22:25:59.160937: step 693, loss 0.538532, acc 0.640625
2018-02-17T22:25:59.402580: step 694, loss 0.474783, acc 0.765625
2018-02-17T22:25:59.653247: step 695, loss 0.464553, acc 0.796875
2018-02-17T22:25:59.897896: step 696, loss 0.444181, acc 0.78125
2018-02-17T22:26:00.155582: step 697, loss 0.450783, acc 0.734375
2018-02-17T22:26:00.396222: step 698, loss 0.429038, acc 0.8125
2018-02-17T22:26:00.635859: step 699, loss 0.530946, acc 0.734375
2018-02-17T22:26:00.879508: step 700, loss 0.363814, acc 0.890625 Evaluation:
2018-02-17T22:26:01.699689: step 700, loss 0.593389, acc 0.679174 Saved model checkpoint to C:\Users\95890\eclipse-workspace\cnn-text-classification\runs\1518877370\checkpoints\model-700 2018-02-17T22:26:02.824816: step 701, loss 0.534961, acc 0.734375
2018-02-17T22:26:03.071471: step 702, loss 0.723283, acc 0.625
2018-02-17T22:26:03.322139: step 703, loss 0.550226, acc 0.75
2018-02-17T22:26:03.574810: step 704, loss 0.421853, acc 0.828125
2018-02-17T22:26:03.822469: step 705, loss 0.585873, acc 0.75
2018-02-17T22:26:04.074138: step 706, loss 0.548535, acc 0.78125
2018-02-17T22:26:04.317787: step 707, loss 0.545322, acc 0.734375
2018-02-17T22:26:04.555418: step 708, loss 0.414953, acc 0.8125
2018-02-17T22:26:04.797061: step 709, loss 0.434529, acc 0.828125

137、TensorFlow使用TextCNN进行文本分类的更多相关文章

  1. Chinese-Text-Classification,用卷积神经网络基于 Tensorflow 实现的中文文本分类。

    用卷积神经网络基于 Tensorflow 实现的中文文本分类 项目地址: https://github.com/fendouai/Chinese-Text-Classification 欢迎提问:ht ...

  2. CNN tensorflow text classification CNN文本分类的例子

    from:http://deeplearning.lipingyang.org/tensorflow-examples-text/ TensorFlow examples (text-based) T ...

  3. 基于Text-CNN模型的中文文本分类实战 流川枫 发表于AI星球订阅

    Text-CNN 1.文本分类 转眼学生生涯就结束了,在家待就业期间正好有一段空闲期,可以对曾经感兴趣的一些知识点进行总结. 本文介绍NLP中文本分类任务中核心流程进行了系统的介绍,文末给出一个基于T ...

  4. 基于Text-CNN模型的中文文本分类实战

    Text-CNN 1.文本分类 转眼学生生涯就结束了,在家待就业期间正好有一段空闲期,可以对曾经感兴趣的一些知识点进行总结. 本文介绍NLP中文本分类任务中核心流程进行了系统的介绍,文末给出一个基于T ...

  5. Bert文本分类实践(二):魔改Bert,融合TextCNN的新思路

    写在前面 ​ 文本分类是nlp中一个非常重要的任务,也是非常适合入坑nlp的第一个完整项目.虽然文本分类看似简单,但里面的门道好多好多,博主水平有限,只能将平时用到的方法和trick在此做个记录和分享 ...

  6. 万字总结Keras深度学习中文文本分类

    摘要:文章将详细讲解Keras实现经典的深度学习文本分类算法,包括LSTM.BiLSTM.BiLSTM+Attention和CNN.TextCNN. 本文分享自华为云社区<Keras深度学习中文 ...

  7. 利用CNN进行中文文本分类(数据集是复旦中文语料)

    利用TfidfVectorizer进行中文文本分类(数据集是复旦中文语料) 利用RNN进行中文文本分类(数据集是复旦中文语料) 上一节我们利用了RNN(GRU)对中文文本进行了分类,本节我们将继续使用 ...

  8. 文本分类实战(二)—— textCNN 模型

    1 大纲概述 文本分类这个系列将会有十篇左右,包括基于word2vec预训练的文本分类,与及基于最新的预训练模型(ELMo,BERT等)的文本分类.总共有以下系列: word2vec预训练词向量 te ...

  9. fastText、TextCNN、TextRNN……这里有一套NLP文本分类深度学习方法库供你选择

    https://mp.weixin.qq.com/s/_xILvfEMx3URcB-5C8vfTw 这个库的目的是探索用深度学习进行NLP文本分类的方法. 它具有文本分类的各种基准模型,还支持多标签分 ...

随机推荐

  1. pytony格式化输出-占位符

    1. %s s = string 字符串 2. %d d = digit 整数 3. %f f = float 浮点数 #!/usr/bin/env python #_*_coding:utf-8_* ...

  2. vue—两个数组,去重相同项

  3. CENTOS6.5 编译安装MySQL5.7.14

    前言 mysql5.7.14 编译安装在自定义文件路径下 下载安装包 配置安装环境 编译安装 cmake \ -DCMAKE_INSTALL_PREFIX=/data/db5714 \ -DMYSQL ...

  4. C++ Lambda 表达式使用详解

    转载自:  http://www.codeceo.com/article/cpp-lambda.html C++ 11 对LB的支持,对于喜欢Functional Programming的人来说,无疑 ...

  5. 【Matlab技巧】工作区变量如何添加到Simulink中?

    对新手来说,在进行simulink仿真时想把工作区的变量添加到Simulink中,这样在如transfer模块中使用时可以直接输变量即可. 如这样: 那么如何对Simulink仿真文件自动赋值呢? 1 ...

  6. VINS 估计器之外参初始化

    为何初始化外参 当外参完全不知道的时候,VINS也可以在线对其进行估计(rotation),先在processImage内进行初步估计,然后在后续优化时,会在optimize函数中再次优化. 如何初始 ...

  7. java操作mongodb工具类

    新建maven项目 pom.xml <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="ht ...

  8. 2018团队项目beta阶段成果汇总

    2018团队项目beta阶段成果汇总   第一组:二手书 团队博客:http://www.cnblogs.com/DeltaFish/ 博客汇总:https://www.cnblogs.com/Del ...

  9. ubuntu18.04 设置环境变量

    1.第一步:命令行输入 sudo gedit /etc/profile 2.第二步:将你想要设置环境变量的内容追加到文件结尾 例如:export JAVA_HOME=/usr/java/latest ...

  10. C# 获取系统环境数据

    using System; using System.Data; using System.Text.RegularExpressions; using System.Threading; names ...