简单构建cnn的网络
def convolutional(x, keep_prob): def conv2d(x, W): return tf.nn.conv2d([1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) x_image = tf.reshape(x, [-1, 28, 28, 1]) W_conb1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conb1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1)
1.训练卷积模型和线性模型的基本步骤都是一样的:第一步是定义模型;定义模型之后使用模型,使用模型之后定义一个训练,一次次迭代之后把模型文件ckpt保存下来,以供后续来使用。
2.定义模型:
# 卷积模型:多层卷积
def convolutional(x, keep_prob):
def conv2d(x, W): # 定义一个2*2的卷积
return tf.nn.conv2d([1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x): # 定义一个2*2的池化层
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape): # 定义一个权重变量
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape): # 定义一个偏置项变量
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# 定义卷积层,第一层:
x_image = tf.reshape(x, [-1, 28, 28, 1]) # 定义图像
W_conv1 = weight_variable([5, 5, 1, 32]) # 权重
b_conv1 = bias_variable([32]) # 偏置项
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # 定义卷积
h_pool1 = max_pool_2x2(h_conv1) # 定义池化
# 第二层实际上和第一层是一样的