完成卷积神经网络的训练
with tf.Session() as sess: merged_summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter('/tmp/mnist_log/1', sess.graph) summary_writer.add_graph(sess.graph) sess.run(tf.global_variables_initializer()) for i in range(20000): batch = data.train.next_batch(50) if i % 100 == 0: train_accuracy = accuracy.eval(feed_dict={x: batch[0], _y: batch[1], keep_prob: 1.0}) print("step %d, train accuracy=%d" % (i, train_accuracy)) sess.run(train_step, feed_dict={x: batch[0], _y: batch[1], keep_prob: 0.5}) print(sess.run(accuracy, feed_dict={x: data.test.images, _y: data.test.labels, keep_prob: 1.0})) path = saver.save(sess, os.path.join(os.path.dirname(__file__), 'data', 'convalution.ckpt'), write_meta_graph=False, write_state=False) print("Saved:", path)
1.在GPU上运行可能会稍微快一些比在CPU上。
2. for i in range(20000): # 对于这样的卷积训练一般要做10000-20000次的循环
batch = data.train.next_batch(50) # 定义batch的大小
if i % 100 == 0: # 每隔100次准确率做一次打印
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy)) # 打印
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0}))
# 保存
path = saver.save(
sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'),
write_meta_graph=False, write_state=False
)
print('Saved:', path)