继续浏览精彩内容
慕课网APP
程序员的梦工厂
打开
继续
感谢您的支持,我会继续努力的
赞赏金额会直接到老师账户
将二维码发送给自己后长按识别
微信支付
支付宝支付

Tensorflow与深度学习编程——初级篇

Coder_zheng
关注TA
已关注
手记 71
粉丝 23
获赞 45

Demo1:基于Tensorflow的深度神经网络:DNN

from tensorflow.examples.tutorials.mnist import input_data  
import tensorflow as tf  
from sklearn.metrics import accuracy_score 
import numpy as np  

if __name__ == '__main__':
	n_inputs = 28 * 28 
	n_hidden1 = 300
	n_hidden2 = 100
	n_outputs = 10

	mnist = input_data.read_data_sets("/tmp/data/") 

	X_train = mnist.train.images 
	X_test  = mnist.test.images
	y_train = mnist.train.labels.astype("int") 
	y_test  = mnist.test.labels.astype("int")

	X = tf.placeholder(tf.float32,shape = (None, n_inputs),name = 'X')
	y = tf.placeholder(tf.int64, shape = (None),name = 'y')
    
    #命名空间:dnn
	with tf.name_scope('dnn'):
		hidden1 = tf.layers.dense(X,n_hidden1,activation = tf.nn.relu, name = "hidden1")

		hidden2 = tf.layers.dense(hidden1,n_hidden2,activation = tf.nn.relu,name = "hidden2")

		logits  = tf.layers.dense(hidden2,n_outputs,name = "outputs")
    
    #命名空间:loss
	with tf.name_scope('loss'):
		x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = y,logits = logits)

		#所有值求平均
		loss = tf.reduce_mean(x_entropy,name = "loss")

 
	learning_rate = 0.01

	#命名空间:train
	with tf.name_scope('train'):
		optimizer = tf.train.GradientDescentOptimizer(learning_rate)
		training_op = optimizer.minimize(loss)

	#命名空间:eval
	with tf.name_scope('eval'):
		#是否与真值一致 返回布尔值
		correct = tf.nn.in_top_k(logits,y,1)
		accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

	init = tf.global_variables_initializer()


	n_epochs = 20
	batch_size = 50

	with tf.Session() as sess:
		init.run()
		for epoch in range(n_epochs):
			for iteration in range(mnist.train.num_examples // batch_size):
				X_batch , y_batch = mnist.train.next_batch(batch_size)
				sess.run(training_op,feed_dict = {X:X_batch,y:y_batch})
				acc_train = accuracy.eval(feed_dict = {X:X_batch,y:y_batch})
				acc_test  = accuracy.eval(feed_dict = {X:mnist.test.images,y:mnist.test.labels})
				print (epoch,"Train accuracy:",acc_train,"Test accuracy",acc_test)

图片描述

官方的温馨提示:

图片描述
获取mnist的方式以后会变
Minist用起来一时爽,真正拿现实生活中的结构化数据跑深度学习模型时容易炸毛。

Demo1的问题:
1.层数比较浅,只有两个隐藏层
2.没有解决梯度消失或梯度爆炸问题

Demo2:
用批量标准化改进DNN,来避免梯度消失或爆炸问题

“TensorFlow
提供了一个batch_normalization()函数,它简单地对输入进行居中和标准化,但是您必须自己计算平均值和标准差(基于训练期间的小批量数据或测试过程中的完整数据集)
作为这个函数的参数,并且还必须处理缩放和偏移量参数的创建(并将它们传递给此函数)。 这是可行的,但不是最方便的方法。
相反,你应该使用batch_norm()函数,它为你处理所有这些。
您可以直接调用它。为了在每个隐藏层激活函数之前运行批量标准化,我们手动应用 RELU 激活函数”

import tensorflow as tf

n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10

X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")

training = tf.placeholder_with_default(False, shape=(), name='training')

hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = tf.layers.batch_normalization(hidden1, training=training, momentum=0.9)
bn1_act = tf.nn.elu(bn1)

hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = tf.layers.batch_normalization(hidden2, training=training, momentum=0.9)
bn2_act = tf.nn.elu(bn2)

logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = tf.layers.batch_normalization(logits_before_bn, training=training,
                                       momentum=0.9)
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
training = tf.placeholder_with_default(False, shape=(), name='training')

为了避免一遍又一遍重复相同的参数,我们可以使用 Python 的partial()函数:

from functools import partial

my_batch_norm_layer = partial(tf.layers.batch_normalization,training=training, momentum=0.9)

hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1")
bn1 = my_batch_norm_layer(hidden1)
bn1_act = tf.nn.elu(bn1)
hidden2 = tf.layers.dense(bn1_act, n_hidden2, name="hidden2")
bn2 = my_batch_norm_layer(hidden2)
bn2_act = tf.nn.elu(bn2)
logits_before_bn = tf.layers.dense(bn2_act, n_outputs, name="outputs")
logits = my_batch_norm_layer(logits_before_bn)

完整代码:

from functools import partial 
from tensorflow.examples.tutorials.mnist import input_data  
import tensorflow as tf  

if __name__ == '__main__':
	n_inputs = 28 * 28  
	n_hidden1 = 300
	n_hidden2 = 100
	n_outputs = 10 


	mnist = input_data.read_data_sets("/tmp/data") 

	batch_norm_momentum = 0.9 
	learning_rate = 0.01 

	X = tf.placeholder(tf.float32,shape = (None,n_inputs),name = "X")
	y = tf.placeholder(tf.int64,shape = None,name = 'y')
	#给Batch norm加一个placeholde
	training = tf.placeholder_with_default(False,shape = (),name = 'training')


	with tf.name_scope("dnn"):
		#权重初始化
		he_init = tf.contrib.layers.variance_scaling_initializer()

		my_batch_norm_layer = partial(
				tf.layers.batch_normalization,
				training = training,
				momentum = batch_norm_momentum
			)

		my_dense_layer = partial(
				tf.layers.dense,
				kernel_initializer = he_init
			)

		hidden1 = my_dense_layer(X,n_hidden1,name = "hidden1")
		bn1     = tf.nn.elu(my_batch_norm_layer(hidden1))
		hidden2 = my_dense_layer(bn1,n_hidden2,name = "hidden2")
		bn2     = tf.nn.elu(my_batch_norm_layer(hidden2))
		logists_before_bn = my_dense_layer(bn2,n_outputs,name = "outputs")
		logists = my_batch_norm_layer(logists_before_bn)

	with tf.name_scope('loss'):
		x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = y,logits = logists )
		loss = tf.reduce_mean(x_entropy, name = 'loss')

	with tf.name_scope("train"):
		optimizer = tf.train.GradientDescentOptimizer(learning_rate)
		training_op = optimizer.minimize(loss)

	with tf.name_scope("eval"):
		correct = tf.nn.in_top_k(logists,y,1)
		accuracy = tf.reduce_mean(tf.cast(correct,tf.float32))

	init = tf.global_variables_initializer()
	saver = tf.train.Saver()

	n_epoches = 20 
	batch_size = 200
# 注意:由于我们使用的是 tf.layers.batch_normalization() 而不是 tf.contrib.layers.batch_norm()(如本书所述),
# 所以我们需要明确运行批量规范化所需的额外更新操作(sess.run([ training_op,extra_update_ops], ...)。

	extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

	with tf.Session() as sess:
		init.run()
		for epoch in range(n_epoches):
			for iteration in range(mnist.train.num_examples // batch_size):
				X_batch,y_batch = mnist.train.next_batch(batch_size)
				sess.run([training_op,extra_update_ops],feed_dict = {training:True,X:X_batch,y:y_batch})
				accuracy_val = accuracy.eval(feed_dict = {X:mnist.test.images, y:mnist.test.labels})

				print (epoch, 'Test accuracy',accuracy_val)

图片描述

Demo3:CNN的卷积层实现

from sklearn.datasets import load_sample_image 
import matplotlib.pyplot as plt  
import numpy as np 
import tensorflow as tf  

if __name__ == '__main__':
	#Load sample images 
	china = load_sample_image("china.jpg")
	flower = load_sample_image("flower.jpg")

	datasets = np.array([china,flower],dtype = np.float32)
	batch_size,height,width,channels = datasets.shape 

	#Create 2 filters 
	filters = np.zeros(shape = (7,7,channels,2), dtype = np.float32)
	filters[:,3,:,0] = 2 #vertical line 
	filters[3,:,:,1] = 2#horizontal line
	# Create a graph with input X plus a convolutional layer applying the 2 filters
	X = tf.placeholder(tf.float32, shape = (None,height,width,channels))
	convolution = tf.nn.conv2d(X,filters,strides = [1,2,2,1],padding = "SAME")

	with tf.Session() as sess:
		output = sess.run(convolution,feed_dict = {X:datasets})

	plt.imshow(output[1, :, :, 1], cmap="gray") # plot 1st image's 2nd feature map
	#plt.imshow(output[0, :, :, 1], cmap="gray")
	plt.show()

图片描述

图片描述

图片描述

图片描述

Demo4:CNN的池化层实现

import numpy as np
from sklearn.datasets import load_sample_image
import tensorflow as tf
import matplotlib.pyplot as plt

china = load_sample_image("china.jpg")
flower = load_sample_image("flower.jpg")

dataset = np.array([china, flower], dtype=np.float32)
batch_size, height, width, channels = dataset.shape

# Create 2 filters
filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)
filters[:, 3, :, 0] = 1  # vertical line
filters[3, :, :, 1] = 1  # horizontal line

X = tf.placeholder(tf.float32, shape=(None, height, width, channels))
max_pool = tf.nn.max_pool(X, ksize=[1,3,3,1], strides=[1,3,3,1],padding="VALID")

with tf.Session() as sess:
    output = sess.run(max_pool, feed_dict={X: dataset})

plt.imshow(output[0].astype(np.uint8))  # plot the output for the 1st image
#plt.imshow(output[1].astype(np.uint8)) 
plt.show()

图片描述

图片描述

Demo5.RNN实现:
最基本的RNN,相当于两个前馈神经网络:

import numpy as np  
import tensorflow as tf  

if __name__ == '__main__':
	n_inputs = 3 
	n_neurons = 5 
	X0 = tf.placeholder(tf.float32,[None,n_inputs])
	X1 = tf.placeholder(tf.float32,[None,n_inputs])
	Wx = tf.Variable(tf.random_normal(shape = [n_inputs,n_neurons],dtype = tf.float32))
	Wy = tf.Variable(tf.random_normal(shape = [n_neurons,n_neurons],dtype = tf.float32))
	b  = tf.Variable(tf.zeros([1,n_neurons],dtype = tf.float32))
	Y0 = tf.tanh(tf.matmul(X0,Wx) + b)
	Y1 = tf.tanh(tf.matmul(Y0,Wy) + tf.matmul(X1,Wx) + b)
	init = tf.global_variables_initializer() 

	#Mini-batch: instance 0,instance 1,instance 2,instance 3 
	X0_batch = np.array([[0,1,2],[3,4,5],[6,7,8],[9,0,1]])  #t = 0 
	X1_batch = np.array([[9,8,7],[0,0,0],[6,5,4],[3,2,1]])  #t = 1
	with tf.Session() as sess:
		init.run()
		Y0_val,Y1_val = sess.run([Y0,Y1],feed_dict = {X0:X0_batch,X1:X1_batch})

	print(Y0_val)
	print('\n')
	print(Y1_val)

图片描述

打开App,阅读手记
0人推荐
发表评论
随时随地看视频慕课网APP