基于Tensorflow-gpu的mnist手寫(xiě)數(shù)字識(shí)別(卷積神經(jīng)網(wǎng)絡(luò)CNN)-學(xué)習(xí)筆記(一)
一、構(gòu)建模型
from __future__import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.layersimport Dense, Flatten, Conv2D
from tensorflow.kerasimport Model
gpus = tf.config.experimental.list_physical_devices('GPU')##獲取可用GPU
for gpu in (gpus):
?tf.config.experimental.set_memory_growth(gpu, True)##設(shè)置顯存使用方式
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train /255.0, x_test /255.0 ##數(shù)據(jù)預(yù)處理歸一化
x_train = x_train[..., tf.newaxis]##增加一個(gè)通道維數(shù)
x_test = x_test[..., tf.newaxis]
train_set = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32)##切分?jǐn)?shù)據(jù)集為BatchDataset尼桶,混淆數(shù)據(jù)集
test_set = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
class MyModel(Model):##網(wǎng)絡(luò)模型定義
? def __init__(self):
? super(MyModel, self).__init__()
? self.conv1 = Conv2D(32, 3, activation='relu')
? self.flatten = Flatten()
? self.d1 = Dense(128, activation='relu')
? self.d2 = Dense(10, activation='softmax')
def __call__(self, x):
? x =self.conv1(x)
? x =self.flatten(x)
? x =self.d1(x)
? return self.d2(x)
mynetwork = MyModel()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()##損失函數(shù)定義
optimizer = tf.keras.optimizers.Adam()##優(yōu)化器定義
train_loss = tf.keras.metrics.Mean(name='train_loss')##損失值
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')##準(zhǔn)確率
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
@tf.function ##訓(xùn)練
def train_step(images, labels):
? with tf.GradientTape()as tape:
???? predictions = mynetwork(images)
??? loss = loss_object(labels, predictions)
? gradients = tape.gradient(loss, mynetwork.trainable_variables)
? optimizer.apply_gradients(zip(gradients, mynetwork.trainable_variables))
? train_loss(loss)
? train_accuracy(labels, predictions)
@tf.function ##測(cè)試
def test_step(images, labels):
? ?? predictions = mynetwork(images)
? ?? ? t_loss = loss_object(labels, predictions)
?test_loss(t_loss)
?test_accuracy(labels, predictions)
for epochin range(5):
# 在下一個(gè)epoch開(kāi)始時(shí),重置評(píng)估指標(biāo)
? ? train_loss.reset_states()
??? train_accuracy.reset_states()
??? test_loss.reset_states()
??? test_accuracy.reset_states()
??? for images, labelsin train_set:
????? train_step(images, labels)
???? for test_images, test_labelsin test_set:
????? test_step(test_images, test_labels)
template ='Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
? ? ? ? ? ? ? ? ? ? ? ? train_loss.result(),
? ? ? ? ? ? ? ? ? ? ? ? train_accuracy.result()*100,
? ? ? ? ? ? ? ? ? ? ? ? test_loss.result(),
? ? ? ? ? ? ? ? ? ? ? ? test_accuracy.result()*100))
二堤魁、預(yù)測(cè)結(jié)果
可以看到耕魄,5個(gè)epoch后準(zhǔn)確率已經(jīng)非常高
摘自:針對(duì)專業(yè)人員的 TensorFlow 2.0 入門(mén) ?|? TensorFlow Core