這篇文章是借鑒很多博文的糟袁,作為一個關(guān)于slim庫的總結(jié)
導入slim 模塊
import tensorflow.contrib.slim as slim
定義slim的變量
Model Variables
weights = slim.model_variable('weights',
shape=[10, 10, 3 , 3],
initializer=tf.truncated_normal_initializer(stddev=0.1),
regularizer=slim.l2_regularizer(0.05),
device='/CPU:0')
model_variables = slim.get_model_variables()
# Regular variables
my_var = slim.variable('my_var',
shape=[20, 1],
initializer=tf.zeros_initializer())
regular_variables_and_model_variables = slim.get_variables()
這里model_variable是作為模型參數(shù)保存的,variable是局部變量,不會保存。
slim中實現(xiàn)一個層
input = ...
net = slim.conv2d(input, 128, [3, 3], scope='conv1_1')
#代碼重用
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
#處理不同參數(shù)情況
x = slim.fully_connected(x, 32, scope='fc/fc_1')
x = slim.fully_connected(x, 64, scope='fc/fc_2')
x = slim.fully_connected(x, 128, scope='fc/fc_3')
or
slim.stack(x, slim.fully_connected, [32, 64, 128], scope='fc')
# 普通方法:
x = slim.conv2d(x, 32, [3, 3], scope='core/core_1')
x = slim.conv2d(x, 32, [1, 1], scope='core/core_2')
x = slim.conv2d(x, 64, [3, 3], scope='core/core_3')
x = slim.conv2d(x, 64, [1, 1], scope='core/core_4')
# 簡便方法:
slim.stack(x, slim.conv2d, [(32, [3, 3]), (32, [1, 1]), (64, [3, 3]), (64, [1, 1])], scope='core')
不同層的代碼復用函數(shù)
Layer | TF-Slim |
---|---|
BiasAdd | slim.bias_add |
BatchNorm | slim.batch_norm |
Conv2d | slim.conv2d |
Conv2dInPlane | slim.conv2d_in_plane |
Conv2dTranspose (Deconv) | slim.conv2d_transpose |
FullyConnected | slim.fully_connected |
AvgPool2D | slim.avg_pool2d |
Dropout | slim.dropout |
Flatten | slim.flatten |
MaxPool2D | slim.max_pool2d |
OneHotEncoding | slim.one_hot_encoding |
SeparableConv2 | slim.separable_conv2d |
UnitNorm | slim.unit_norm |
定義相同參數(shù)的簡化
with slim.arg_scope([slim.conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01)
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.conv2d(inputs, 64, [11, 11], scope='conv1')
net = slim.conv2d(net, 128, [11, 11], padding='VALID', scope='conv2')
net = slim.conv2d(net, 256, [11, 11], scope='conv3')
#arg_scope的嵌套
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=tf.truncated_normal_initializer(stddev=0.03),
scope='conv2')
net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc')
訓練模型
loss = slim.losses.softmax_cross_entropy(predictions, labels)
#自定義loss模型
# Define the loss functions and get the total loss.
classification_loss = slim.losses.softmax_cross_entropy(scene_predictions, scene_labels)
sum_of_squares_loss = slim.losses.sum_of_squares(depth_predictions, depth_labels)
pose_loss = MyCustomLossFunction(pose_predictions, pose_labels)
slim.losses.add_loss(pose_loss) # Letting TF-Slim know about the additional loss.
# The following two ways to compute the total loss are equivalent:
regularization_loss = tf.add_n(slim.losses.get_regularization_losses())
total_loss1 = classification_loss + sum_of_squares_loss + pose_loss + regularization_loss
slim讀取保存模型的方法
# Create some variables.
v1 = slim.variable(name="v1", ...)
v2 = slim.variable(name="nested/v2", ...)
...
# Get list of variables to restore (which contains only 'v2').
variables_to_restore = slim.get_variables_by_name("v2")
# Create the saver which will be used to restore the variables.
restorer = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")
print("Model restored.")
#為模型添加前綴
假設(shè)我們定義的網(wǎng)絡(luò)變量是conv1/weights,而從VGG加載的變量名為vgg16/conv1/weights格侯,正常load肯定會報錯(找不到變量名),但是可以這樣:
def name_in_checkpoint(var):
return 'vgg16/' + var.op.name
variables_to_restore = slim.get_model_variables()
variables_to_restore = {name_in_checkpoint(var):var for var in variables_to_restore}
restorer = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")
訓練模型
在該例中财著,slim.learning.train根據(jù)train_op計算損失联四、應(yīng)用梯度step。logdir指定了checkpoints和event文件的存儲路徑撑教。我們可以限制梯度step到任何數(shù)值朝墩。這里我們采用1000步。最后伟姐,save_summaries_secs=300表示每5分鐘計算一次summaries收苏,save_interval_secs=600表示每10分鐘保存一次模型的checkpoint。
g = tf.Graph()
# Create the model and specify the losses...
...
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# create_train_op ensures that each time we ask for the loss, the update_ops
# are run and the gradients being computed are applied too.
train_op = slim.learning.create_train_op(total_loss, optimizer)
logdir = ... # Where checkpoints are stored.
slim.learning.train(
train_op,
logdir,
number_of_steps=1000,
save_summaries_secs=300,
save_interval_secs=600)
Fine-Tuning a Model on a different task
假設(shè)我們有一個已經(jīng)預訓練好的VGG16的模型愤兵。這個模型是在擁有1000分類的ImageNet數(shù)據(jù)集上進行訓練的鹿霸。但是,現(xiàn)在我們想把它應(yīng)用在只具有20個分類的Pascal VOC數(shù)據(jù)集上秆乳。為了能這樣做懦鼠,我們可以通過利用除最后一些全連接層的其他預訓練模型值來初始化新模型的達到目的:
# Load the Pascal VOC data
image, label = MyPascalVocDataLoader(...)
images, labels = tf.train.batch([image, label], batch_size=32)
# Create the model
predictions = vgg.vgg_16(images)
train_op = slim.learning.create_train_op(...)
# Specify where the Model, trained on ImageNet, was saved.
model_path = '/path/to/pre_trained_on_imagenet.checkpoint'
metric_ops.py
# Specify where the new model will live:
log_dir =
from_checkpoint_
'/path/to/my_pascal_model_dir/'
# Restore only the convolutional layers:
variables_to_restore = slim.get_variables_to_restore(exclude=['fc6', 'fc7', 'fc8'])
init_fn = assign_from_checkpoint_fn(model_path, variables_to_restore)
# Start training.
slim.learning.train(train_op, log_dir, init_fn=init_fn)
evaluation loop
import tensorflow as tf
slim = tf.contrib.slim
# Load the data
images, labels = load_data(...)
# Define the network
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'accuracy': slim.metrics.accuracy(predictions, labels),
'precision': slim.metrics.precision(predictions, labels),
'recall': slim.metrics.recall(mean_relative_errors, 0.3),
})
# Create the summary ops such that they also print out to std output:
summary_ops = []
for metric_name, metric_value in names_to_values.iteritems():
op = tf.summary.scalar(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
num_examples = 10000
batch_size = 32
num_batches = math.ceil(num_examples / float(batch_size))
# Setup the global step.
slim.get_or_create_global_step()
output_dir = ... # Where the summaries are stored.
eval_interval_secs = ... # How often to run the evaluation.
slim.evaluation.evaluation_loop(
'local',
checkpoint_dir,
log_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
summary_op=tf.summary.merge(summary_ops),
eval_interval_secs=eval_interval_secs)