static_rnn和dynamic_rnn的區別主要在于實現不同。
-
static_rnn會把RNN展平,用空間換時間。 gpu會吃不消(個人測試結果)
-
dynamic_rnn則是使用for或者while循環。
調用static_rnn實際上是生成了rnn按時間序列展開之后的圖。打開tensorboard你會看到sequence_length個rnn_cell
stack在一起,只不過這些cell是share
weight的。因此,sequence_length就和圖的拓撲結構綁定在了一起,因此也就限制了每個batch的sequence_length必須是一致。調用dynamic_rnn不會將rnn展開,而是利用tf.while_loop這個api,通過Enter, Switch, Merge,
LoopCondition, NextIteration等這些control
flow的節點,生成一個可以執行循環的圖(這個圖應該還是靜態圖,因為圖的拓撲結構在執行時是不會變化的)。在tensorboard上,你只會看到一個rnn_cell,
外面被一群control
flow節點包圍著。對于dynamic_rnn來說,sequence_length僅僅代表著循環的次數,而和圖本身的拓撲沒有關系,所以每個batch可以有不同sequence_length。
static_rnn
導包、加載數據、定義變量
import tensorflow as tf
tf.reset_default_graph() #流式計算圖形graph 循環神經網絡 將名字相同重置了圖
import datetime #打印時間
import os #保存文件
from tensorflow.examples.tutorials.mnist import input_data# minst測試集
mnist = input_data.read_data_sets('../', one_hot=True)# 每次使用100條數據進行訓練
batch_size = 100
# 圖像向量
width = 28
height = 28
# LSTM隱藏神經元數量
rnn_size = 256
# 輸出層one-hot向量長度的
out_size = 10
聲明變量
def weight_variable(shape, w_alpha=0.01):initial = w_alpha * tf.random_normal(shape)return tf.Variable(initial)def bias_variable(shape, b_alpha=0.1):initial = b_alpha * tf.random_normal(shape)return tf.Variable(initial)# 權重及偏置
w = weight_variable([rnn_size, out_size])
b = bias_variable([out_size])
將數據轉化成RNN所要求的數據
# 按照圖片大小申請占位符
X = tf.placeholder(tf.float32, [None, height, width])
# 原排列[0,1,2]transpose為[1,0,2]代表前兩維裝置,如shape=(1,2,3)轉為shape=(2,1,3)
# 這里的實際意義是把所有圖像向量的相同行號向量轉到一起,如x1的第一行與x2的第一行
x = tf.transpose(X, [1, 0, 2])
# reshape -1 代表自適應,這里按照圖像每一列的長度為reshape后的列長度
x = tf.reshape(x, [-1, width])
# split默任在第一維即0 dimension進行分割,分割成height份,這里實際指把所有圖片向量按對應行號進行重組
x = tf.split(x, height)
構建靜態的循環神經網絡
# LSTM
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_size)
# 這里RNN會有與輸入層相同數量的輸出層,我們只需要最后一個輸出
outputs, status = tf.nn.static_rnn(lstm_cell, x, dtype=tf.float32)#取最后一個進行矩陣乘法
y_conv = tf.add(tf.matmul(outputs[-1], w), b)
# 最小化損失優化
Y = tf.placeholder(dtype=tf.float32,shape = [None,10])
#損失使用的交叉熵
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_conv, labels=Y))
optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)
# 計算準確率
correct = tf.equal(tf.argmax(y_conv, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
模型的訓練
# 啟動會話.開始訓練
saver = tf.train.Saver()
session = tf.Session()
session.run(tf.global_variables_initializer())
step = 0
acc_rate = 0.90
while 1:batch_x, batch_y = mnist.train.next_batch(batch_size)batch_x = batch_x.reshape((batch_size, height, width))session.run(optimizer, feed_dict={X:batch_x,Y:batch_y})# 每訓練10次測試一次if step % 10 == 0:batch_x_test = mnist.test.imagesbatch_y_test = mnist.test.labelsbatch_x_test = batch_x_test.reshape([-1, height, width])acc = session.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test})print(datetime.datetime.now().strftime('%c'), ' step:', step, ' accuracy:', acc)# 偏差滿足要求,保存模型if acc >= acc_rate:
# os.sep = ‘/’model_path = os.getcwd() + os.sep + str(acc_rate) + "mnist.model"saver.save(session, model_path, global_step=step)breakstep += 1
session.close()
Wed Dec 18 10:08:45 2019 step: 0 accuracy: 0.1006
Wed Dec 18 10:08:46 2019 step: 10 accuracy: 0.1009
Wed Dec 18 10:08:46 2019 step: 20 accuracy: 0.1028
…
Wed Dec 18 10:08:57 2019 step: 190 accuracy: 0.9164
dynamic_rnn
加載數據,聲明變量
import tensorflow as tf
tf.reset_default_graph()
from tensorflow.examples.tutorials.mnist import input_data# 載入數據
mnist = input_data.read_data_sets("../", one_hot=True)# 輸入圖片是28
n_input = 28
max_time = 28
lstm_size = 100 # 隱藏單元 可調
n_class = 10 # 10個分類
batch_size = 100 # 每次50個樣本 可調
n_batch_size = mnist.train.num_examples // batch_size # 計算一共有多少批次
Extracting …/train-images-idx3-ubyte.gz
Extracting …/train-labels-idx1-ubyte.gz
Extracting …/t10k-images-idx3-ubyte.gz
Extracting …/t10k-labels-idx1-ubyte.gz
占位符、權重
# 這里None表示第一個維度可以是任意長度
# 創建占位符
x = tf.placeholder(tf.float32,[None, 28*28])
# 正確的標簽
y = tf.placeholder(tf.float32,[None, 10])# 初始化權重 ,stddev為標準差
weight = tf.Variable(tf.truncated_normal([lstm_size, n_class], stddev=0.1))
# 初始化偏置層
biases = tf.Variable(tf.constant(0.1, shape=[n_class]))
構建動態RNN、損失函數、準確率
# 定義RNN網絡
def RNN(X, weights, biases):# 原始數據為[batch_size,28*28]# input = [batch_size, max_time, n_input]input_ = tf.reshape(X,[-1, max_time, n_input])# 定義LSTM的基本單元
# lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(lstm_size)# final_state[0] 是cell state# final_state[1] 是hidden statoutputs, final_state = tf.nn.dynamic_rnn(lstm_cell, input_, dtype=tf.float32)display(final_state)results = tf.nn.softmax(tf.matmul(final_state[1],weights)+biases)return results
# 計算RNN的返回結果
prediction = RNN(x, weight, biases)
# 損失函數
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# 使用AdamOptimizer進行優化
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
# 將結果存下來
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
# 計算正確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
LSTMStateTuple(c=<tf.Tensor ‘rnn/while/Exit_3:0’ shape=(?, 100) dtype=float32>, h=<tf.Tensor ‘rnn/while/Exit_4:0’ shape=(?, 100) dtype=float32>)
訓練數據
saver = tf.train.Saver()with tf.Session() as sess:sess.run(tf.global_variables_initializer())for epoch in range(6):for batch in range(n_batch_size):# 取出下一批次數據batch_xs,batch_ys = mnist.train.next_batch(batch_size)sess.run(train_step, feed_dict={x: batch_xs,y: batch_ys})if(batch%100==0):print(str(batch)+"/" + str(n_batch_size))acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})print("Iter" + str(epoch) + " ,Testing Accuracy = " + str(acc))if acc >0.9:saver.save(sess,'./rnn_dynamic')break
0/550
100/550
200/550
300/550
400/550
500/550
Iter0 ,Testing Accuracy = 0.5903
…
Iter5 ,Testing Accuracy = 0.9103