tenosflow的圖操作比較重要
通過圖形操作可以讓對圖有跟進一步了解
上一個簡單的訓(xùn)練的代碼
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#載入數(shù)據(jù)集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
#每個批次100張照片
batch_size = 100
#計算一共有多少個批次
n_batch = mnist.train.num_examples // batch_size
#定義兩個placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#創(chuàng)建一個簡單的神經(jīng)網(wǎng)絡(luò)人断,輸入層784個神經(jīng)元,輸出層10個神經(jīng)元
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
#二次代價函數(shù)
# loss = tf.reduce_mean(tf.square(y-prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#初始化變量
init = tf.global_variables_initializer()
#結(jié)果存放在一個布爾型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一維張量中最大的值所在的位置
#求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
for epoch in range(11):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
#保存模型
saver.save(sess,'net/my_net.ckpt')
結(jié)果是:
0.8241
再把圖restore
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
print_tensors_in_checkpoint_file('net/my_net.ckpt', None, True,True)
#載入數(shù)據(jù)集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
#每個批次100張照片
batch_size = 100
#計算一共有多少個批次
n_batch = mnist.train.num_examples // batch_size
#定義兩個placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#創(chuàng)建一個簡單的神經(jīng)網(wǎng)絡(luò)垒探,輸入層784個神經(jīng)元虫溜,輸出層10個神經(jīng)元
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
#二次代價函數(shù)
# loss = tf.reduce_mean(tf.square(y-prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#初始化變量
init = tf.global_variables_initializer()
#結(jié)果存放在一個布爾型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一維張量中最大的值所在的位置
#求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
saver.restore(sess,'net/my_net.ckpt')
print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
結(jié)果是:
0.8241
高潮來了
上面的restore需要再寫一遍圖就是
with tf.Session() as sess:前面所有的代碼:下面這的不需要
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
reader = tf.train.NewCheckpointReader('net/my_net.ckpt').get_variable_to_shape_map()
for variable in reader:#遍歷變量的名稱和維度
print(variable 列肢,reader[variable])
print_tensors_in_checkpoint_file('net/my_net.ckpt', None, True,True)
#載入數(shù)據(jù)集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
saver = tf.train.import_meta_graph('net/my_net.ckpt.meta')
with tf.Session() as sess:
saver.restore(sess,'net/my_net.ckpt')
g = tf.get_default_graph()#獲取圖
op = g.get_operations()#獲取圖中的操作,主要是為了查看參數(shù)名字(為查看未命名參數(shù))
# print(g,op)
sofmax = tf.get_default_graph().get_tensor_by_name('Softmax_5:0')
x = tf.get_default_graph().get_tensor_by_name('Placeholder:0')
accuracy = tf.get_default_graph().get_tensor_by_name('Mean_1:0')
y = tf.get_default_graph().get_tensor_by_name('Placeholder_1:0')
print(sofmax,x)
print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
結(jié)果是:
0.8241
你需要通過op找到sofmax 员萍,x靡羡,accuracy瞒大,y的tensor的名稱螃征,再 用tf.get_default_graph().get_tensor_by_name('Mean_1:0')得到tensor實體,
這句saver = tf.train.import_meta_graph('net/my_net.ckpt.meta')直接加載圖透敌,這樣極大簡化了restore編寫盯滚。
當(dāng)然你在編寫代碼時給sofmax ,x酗电,accuracy魄藕,y取一個方便的名字會更加方便。
這句print_tensors_in_checkpoint_file('net/my_net.ckpt', None, True,True)是查看checkpoint的參數(shù)撵术,也就是weight背率。
還有一個save成一個文件的方法,保存為pb文件:
看了(公輸睚信)的博客改成了下面的格式
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import graph_util
#載入數(shù)據(jù)集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
#每個批次100張照片
batch_size = 100
#計算一共有多少個批次
n_batch = mnist.train.num_examples // batch_size
#定義兩個placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#創(chuàng)建一個簡單的神經(jīng)網(wǎng)絡(luò),輸入層784個神經(jīng)元寝姿,輸出層10個神經(jīng)元
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
#二次代價函數(shù)
# loss = tf.reduce_mean(tf.square(y-prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#初始化變量
init = tf.global_variables_initializer()
#結(jié)果存放在一個布爾型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一維張量中最大的值所在的位置
#求準確率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
#saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
for epoch in range(1):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
#保存模型
graph_def = tf.get_default_graph().as_graph_def()
output_graph_def = graph_util.convert_variables_to_constants(sess,graph_def,['Softmax','Placeholder','Mean'] )
with tf.gfile.GFile('net/my_net.pb', 'wb') as fid:
serialized_graph = output_graph_def.SerializeToString()
fid.write(serialized_graph)
restore已經(jīng)保存的pb文件:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
#載入數(shù)據(jù)集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
import os
def load_model(path_to_model):
if not os.path.exists(path_to_model):
raise ValueError("'path_to_model.pb' is not exist.")
model_graph = tf.Graph()
with model_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_model, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return model_graph
model_graph = load_model('net/my_net.pb')
with model_graph.as_default():
with tf.Session(graph=model_graph) as sess:
sofmax = model_graph.get_tensor_by_name('Softmax:0')
x= model_graph.get_tensor_by_name('Placeholder:0')
accuracy= model_graph.get_tensor_by_name('Mean:0')
y = model_graph.get_tensor_by_name('Placeholder_1:0')
print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))