卷積神經(jīng)網(wǎng)絡(luò)結(jié)構(gòu)
書(shū)籍上寫(xiě)的MNIST數(shù)字識(shí)別過(guò)于復(fù)雜吼鱼,其實(shí)有很簡(jiǎn)單的版本。
下面的模型是MNIST識(shí)別的簡(jiǎn)單版本
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
#讀取數(shù)據(jù)
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
sess=tf.InteractiveSession()
#構(gòu)建cnn網(wǎng)絡(luò)結(jié)構(gòu)
def conv2d(x,w):
return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME')
#自定義池化函數(shù)
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
#設(shè)置占位符戳玫,尺寸為樣本輸入和輸出的尺寸
x=tf.placeholder(tf.float32,[None,784])
y_=tf.placeholder(tf.float32,[None,10])
x_img=tf.reshape(x,[-1,28,28,1])
#設(shè)置第一個(gè)卷積層和池化層
w_conv1=tf.Variable(tf.truncated_normal([3,3,1,32],stddev=0.1))
b_conv1=tf.Variable(tf.constant(0.1,shape=[32]))
h_conv1=tf.nn.relu(conv2d(x_img,w_conv1)+b_conv1)
h_pool1=max_pool_2x2(h_conv1)
#設(shè)置第二個(gè)卷積層和池化層
w_conv2=tf.Variable(tf.truncated_normal([3,3,32,50],stddev=0.1))
b_conv2=tf.Variable(tf.constant(0.1,shape=[50]))
h_conv2=tf.nn.relu(conv2d(h_pool1,w_conv2)+b_conv2)
h_pool2=max_pool_2x2(h_conv2)
#設(shè)置第一個(gè)全連接層
w_fc1=tf.Variable(tf.truncated_normal([7*7*50,1024],stddev=0.1))
b_fc1=tf.Variable(tf.constant(0.1,shape=[1024]))
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*50])
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1)+b_fc1)
#dropout(隨機(jī)權(quán)重失活)
keep_prob=tf.placeholder(tf.float32)
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)
#設(shè)置第二個(gè)全連接層
w_fc2=tf.Variable(tf.truncated_normal([1024,10],stddev=0.1))
b_fc2=tf.Variable(tf.constant(0.1,shape=[10]))
y_out=tf.nn.softmax(tf.matmul(h_fc1_drop,w_fc2)+b_fc2)
#建立loss function,為交叉熵
loss=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_out),reduction_indices=[1]))
#配置Adam優(yōu)化器,學(xué)習(xí)速率為1e-4
train_step=tf.train.AdamOptimizer(1e-4).minimize(loss)
#建立正確率計(jì)算表達(dá)式
correct_prediction=tf.equal(tf.argmax(y_out,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
#開(kāi)始喂數(shù)據(jù)畴蒲,訓(xùn)練
tf.global_variables_initializer().run()
for i in range(20000):
batch=mnist.train.next_batch(50)
if i%100==0:
train_accuracy=accuracy.eval(feed_dict={x:batch[0],y_:batch[1],keep_prob:1})
print "step %d,train_accuracy= %g"%(i,train_accuracy)
train_step.run(feed_dict={x:batch[0],y_:batch[1],keep_prob:0.5})
#訓(xùn)練之后,使用測(cè)試集進(jìn)行測(cè)試对室,輸出最終結(jié)果
print "test_accuracy= %g"%accuracy.eval(feed_dict={x:mnist.test.images,y_:mnist.test.labels,keep_prob:1})
總結(jié)
- 第一步生成數(shù)據(jù)訓(xùn)練和標(biāo)簽的生成器饿凛,便于一次次的 next 獲取數(shù)據(jù)。
- 卷積網(wǎng)絡(luò)的編寫(xiě)软驰,需要注意輸入和輸出網(wǎng)絡(luò)的參數(shù)涧窒。
- 損失函數(shù)和優(yōu)化代碼的編寫(xiě)。
下面的總結(jié)具體圍繞上面三個(gè)點(diǎn)展開(kāi)