NO IMAGE

上一篇文章,實現了網路的輸入,這次繼續完成網路的訓練,網路採用VGG16的結構。其中為了方便,keep_prob無論是訓練還是測試,都這成了1,大家應該根據需要feed進不同的值。網路的輸入TFRecord.createBatch(),為上一篇文章中產生資料的方法。

1.定義網路引數
import tensorflow as tf
import numpy as np
import TFRecord
#定義網路引數
learning_rate = 0.001
display_step = 5
epochs = 10
keep_prob = 0.5

2.定義各種型別的層
#定義卷積操作
def conv_op(input_op, name, kh, kw, n_out, dh, dw):
input_op = tf.convert_to_tensor(input_op)
n_in = input_op.get_shape()[-1].value
with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope "w",
shape = [kh, kw, n_in, n_out],
dtype = tf.float32,
initializer = tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding = 'SAME')
bias_init_val = tf.constant(0.0, shape = [n_out], dtype = tf.float32)
biases = tf.Variable(bias_init_val, trainable = True, name = 'b')
z = tf.nn.bias_add(conv, biases)
activation = tf.nn.relu(z, name = scope)
return activation
#定義全連線操作
def fc_op(input_op, name, n_out):
n_in = input_op.get_shape()[-1].value
with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope 'w',
shape = [n_in, n_out],
dtype = tf.float32,
initializer = tf.contrib.layers.xavier_initializer())
biases = tf.Variable(tf.constant(0.1, shape = [n_out], dtype = tf.float32), name = 'b')
# tf.nn.relu_layer對輸入變數input_op與kernel做矩陣乘法加上bias,再做RELU非線性變換得到activation
activation = tf.nn.relu_layer(input_op, kernel, biases, name = scope) 
return activation
#定義池化層
def mpool_op(input_op, name, kh, kw, dh, dw):
return  tf.nn.max_pool(input_op,
ksize = [1, kh, kw, 1],
strides = [1, dh, dw, 1],
padding = 'SAME',
name = name)

3. 定義網路結構
def inference_op(input_op, keep_prob):
# block 1 -- outputs 112x112x64
conv1_1 = conv_op(input_op, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1)
conv1_2 = conv_op(conv1_1,  name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1)
pool1 = mpool_op(conv1_2,   name="pool1",   kh=2, kw=2, dw=2, dh=2)
# block 2 -- outputs 56x56x128
conv2_1 = conv_op(pool1,    name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1)
conv2_2 = conv_op(conv2_1,  name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1)
pool2 = mpool_op(conv2_2,   name="pool2",   kh=2, kw=2, dh=2, dw=2)
# # block 3 -- outputs 28x28x256
conv3_1 = conv_op(pool2,    name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1)
conv3_2 = conv_op(conv3_1,  name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1)
conv3_3 = conv_op(conv3_2,  name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1)    
pool3 = mpool_op(conv3_3,   name="pool3",   kh=2, kw=2, dh=2, dw=2)
# block 4 -- outputs 14x14x512
conv4_1 = conv_op(pool3,    name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1)
conv4_2 = conv_op(conv4_1,  name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1)
conv4_3 = conv_op(conv4_2,  name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1)
pool4 = mpool_op(conv4_3,   name="pool4",   kh=2, kw=2, dh=2, dw=2)
# block 5 -- outputs 7x7x512
conv5_1 = conv_op(pool4,    name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1)
conv5_2 = conv_op(conv5_1,  name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1)
conv5_3 = conv_op(conv5_2,  name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1)
pool5 = mpool_op(conv5_3,   name="pool5",   kh=2, kw=2, dw=2, dh=2)
# flatten
shp = pool5.get_shape()
flattened_shape = shp[1].value * shp[2].value * shp[3].value
resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1")
# fully connected
fc6 = fc_op(resh1, name="fc6", n_out=4096)
fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop")
fc7 = fc_op(fc6_drop, name="fc7", n_out=4096)
fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop")
logits = fc_op(fc7_drop, name="fc8", n_out=2)
return logits
4.開始訓練
     cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return optimizer, cost, accuracy

5. 主函式
if __name__ == "__main__":
train_filename = "/home/wc/DataSet/traffic/testTFRecord/train.tfrecords"
test_filename = "/home/wc/DataSet/traffic/testTFRecord/test.tfrecords"
image_batch, label_batch = TFRecord.createBatch(filename = train_filename, batchsize=2)
test_image, test_label = TFRecord.createBatch(filename = test_filename, batchsize=20)
pred = inference_op(input_op = image_batch, keep_prob = keep_prob)
test_pred = inference_op(input_op = test_image, keep_prob = keep_prob)
optimizer, cost, accuracy = train(logits = pred, labels = label_batch)
test_optimizer, test_cost, test_acc = train(logits = test_pred, labels = test_label)
initop = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
with tf.Session() as sess:   
sess.run(initop)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess = sess, coord = coord)        
step = 0
while step < epochs:
step  = 1
print step
_, loss, acc = sess.run([optimizer,cost,accuracy])
if step % display_step ==0:  
print loss,acc
print "training finish!"
_, testLoss, testAcc = sess.run([test_optimizer,test_cost,test_acc])
print "Test acc = "  str(testAcc)
print "Test Finish!"

其中的一些引數,都是隨便設定的,我們的目的僅僅是將網路結構搭好,把資料feed進網路,將網路訓練起來,其中epochs引數,是通過 總樣本數 20 / batch_size 2 計算得來的,因為上篇文章中,讀取tfrecord資料的方法中,建立了一個檔案佇列,

filename_queue = tf.train.string_input_producer([filename], shuffle=False,num_epochs = 1)

這裡的num_epochs = 1,使得資料只能被讀取一個週期,如果不設定,則可以重複讀取。