RNN之bi-LSTM递归神经网络
百度网盘代码分享:http://pan.baidu.com/s/1eRQcCuu
电脑:Win7旗舰版+64Bit+AMD Athlon(tm)X2 DualCore QL-64 2.10GHz RAM2.75GB
Anaconda3-4.2.0-Windows-x86_64
- import time
- #from tensorflow.examples.tutorials.mnist import input_data
- import tensorflow as tf
- import Get_Mnist_Data
- start=time.clock()
- #mnist = input_data.read_data_sets('/temp/', one_hot=True)
- mnist = Get_Mnist_Data.read_data_sets('Get_Mnist_Data', one_hot=True)
- end=time.clock()
- print('Runing time = %s Seconds'%(end-start))
- def compute_accuracy(v_x, v_y):
- global pred
- #input v_x to nn and get the result with y_pre
- y_pre = sess.run(pred, feed_dict={x:v_x})
- #find how many right
- correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_y,1))
- #calculate average
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- #get input content
- result = sess.run(accuracy,feed_dict={x: v_x, y: v_y})
- return result
- def Bi_lstm(X):
- lstm_f_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)
- lstm_b_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)
- return tf.contrib.rnn.static_bidirectional_rnn(lstm_f_cell, lstm_b_cell, X, dtype=tf.float32)
- def RNN(X,weights,biases):
- # hidden layer for input
- X = tf.reshape(X, [-1, n_inputs])
- X_in = tf.matmul(X, weights['in']) + biases['in']
- #reshape data put into bi-lstm cell
- X_in = tf.reshape(X_in, [-1,n_steps, n_hidden_units])
- X_in = tf.transpose(X_in, [1,0,2])
- X_in = tf.reshape(X_in, [-1, n_hidden_units])
- X_in = tf.split(X_in, n_steps)
- outputs, _, _ = Bi_lstm(X_in)
-
- #hidden layer for output as the final results
- results = tf.matmul(outputs[-1], weights['out']) + biases['out']
- return results
-
- #load mnist data
- #mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
- # parameters init
- l_r = 0.001
- training_iters = 100
- batch_size = 128
- n_inputs = 28
- n_steps = 28
- n_hidden_units = 128
- n_classes = 10
- #define placeholder for input
- x = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
- y = tf.placeholder(tf.float32, [None, n_classes])
- # define w and b
- weights = {
- 'in': tf.Variable(tf.random_normal([n_inputs,n_hidden_units])),
- 'out': tf.Variable(tf.random_normal([2*n_hidden_units,n_classes]))
- }
- biases = {
- 'in': tf.Variable(tf.constant(0.1,shape=[n_hidden_units,])),
- 'out': tf.Variable(tf.constant(0.1,shape=[n_classes,]))
- }
- pred = RNN(x, weights, biases)
- cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
- train_op = tf.train.AdamOptimizer(l_r).minimize(cost)
- correct_pred = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
- accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
- #init session
- sess = tf.Session()
- #init all variables
- sess.run(tf.global_variables_initializer())
- #start training
- # x_image,x_label = mnist.test.next_batch(500)
- # x_image = x_image.reshape([500, n_steps, n_inputs])
- for i in range(training_iters):
- #get batch to learn easily
- batch_x, batch_y = mnist.train.next_batch(batch_size)
- batch_x = batch_x.reshape([batch_size, n_steps, n_inputs])
- sess.run(train_op,feed_dict={x: batch_x, y: batch_y})
- if i % 20 == 0:
- print(sess.run(accuracy,feed_dict={x: batch_x, y: batch_y,}))
- test_data = mnist.test.images.reshape([-1, n_steps, n_inputs])
- test_label = mnist.test.labels
- #print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
- print("Testing Accuracy: ", compute_accuracy(test_data, test_label))
- sess.close()
复制代码 原理分析:参考: RNN之LSTM递归神经网络
以arrive、Wuhan、on三个单词为例,分别设为x1、x2、x3。 LSTM网络的做法1是:y1由x1决定,y2由a1和x2决定,a1由x1得到,y3由a2和x3决定,a2由x1和x2得到。 针对arrive、Wuhan、on三个单词例子,我们也可以反过来考虑。 LSTM网络的做法2是:y3由x3决定,y2由a3和x2决定,a3由x3得到,y1由a2和x1决定,a2由x2和x3得到。 LSTM网络的做法1和LSTM网络的做法2好像都有道理,那么将LSTM网络的做法1和LSTM网络的做法2结合起来,就引入了Bi-LSTM双向LSTM网络。具体如图所示。
Bi-LSTM神经网络
|