CIFAR10數(shù)據(jù)集來源:torchvision.datasets.CIFAR10()
CIFAR10數(shù)據(jù)集是一個用于識別普適物體的小型數(shù)據(jù)集,一共包含10個類別的RGB彩色圖片枝誊,圖片尺寸大小為32x32松忍,如圖:
相較于MNIST數(shù)據(jù)集,MNIST數(shù)據(jù)集是28x28的單通道灰度圖,而CIFAR10數(shù)據(jù)集是32x32的RGB三通道彩色圖,CIFAR10數(shù)據(jù)集更接近于真實世界的圖片。
全連接的缺點有:
全連接參數(shù)過多边坤,會導(dǎo)致訓(xùn)練量過大
全連接把圖像展開成一個向量,丟失了圖像原本的位置信息
全連接限制圖像的尺寸谅年,而卷積則不關(guān)心圖像尺寸大小茧痒,只需要接受輸入的通道數(shù),輸出的通道數(shù)和卷積核大小即可確定圖像尺寸的變換過程融蹂,即
padding:對輸入圖片進(jìn)行填充旺订,一般用0填充,padding=1超燃,代表填充一圈区拳,保證卷積前后的圖像尺寸大小一致,padding計算公式如下:
stride步長:指的是卷積核每次滑動的距離大小
本文采用VGGNet16來構(gòu)建深度網(wǎng)絡(luò)模型
1. 數(shù)據(jù)集構(gòu)建
每個像素點即每條數(shù)據(jù)中的值范圍為0-255意乓,有的數(shù)字過大不利于訓(xùn)練且難以收斂樱调,故將其歸一化到(0-1)之間
# 數(shù)據(jù)集處理
# transforms.RandomHorizontalFlip(p=0.5)---以0.5的概率對圖片做水平橫向翻轉(zhuǎn)
transform_train = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
# transforms.ToTensor()---shape從(H,W,C)->(C,H,W), 每個像素點從(0-255)映射到(0-1):直接除以255
# transforms.Normalize---先將輸入歸一化到(0,1),像素點通過"(x-mean)/std",將每個元素分布到(-1,1)
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(std=(0.485, 0.456, 0.406), mean=(0.226, 0.224, 0.225))])
train_dataset = datasets.CIFAR10(root="../DataSet/cifar10", train=True, transform=transform_train,
download=True)
test_dataset = datasets.CIFAR10(root="../DataSet/cifar10", train=False, transform=transform,
download=True)
2. 用Pytorch提供的DataLoader來加載數(shù)據(jù)集
# dataset:數(shù)據(jù)集 batch_size:mini-batch的大小 shuffle:是否打亂數(shù)據(jù)集順序
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
3.采用VGGNet16的神經(jīng)網(wǎng)絡(luò)來構(gòu)建模型,最后接Softmax來處理output
# 構(gòu)建 VGGNet16 網(wǎng)絡(luò)模型
class VGGNet16(nn.Module):
def __init__(self):
super(VGGNet16, self).__init__()
self.Conv1 = nn.Sequential(
# CIFAR10 數(shù)據(jù)集是彩色圖 - RGB三通道, 所以輸入通道為 3, 圖片大小為 32*32
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
# inplace-選擇是否對上層傳下來的tensor進(jìn)行覆蓋運算, 可以有效地節(jié)省內(nèi)存/顯存
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
# 池化層
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.Conv2 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.Conv3 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.Conv4 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.Conv5 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
# 全連接層
self.fc = nn.Sequential(
nn.Linear(512, 256),
nn.ReLU(inplace=True),
# 使一半的神經(jīng)元不起作用届良,防止參數(shù)量過大導(dǎo)致過擬合
nn.Dropout(0.5),
nn.Linear(256, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(128, 10)
)
def forward(self, x):
# 五個卷積層
x = self.Conv1(x)
x = self.Conv2(x)
x = self.Conv3(x)
x = self.Conv4(x)
x = self.Conv5(x)
# 數(shù)據(jù)平坦化處理笆凌,為接下來的全連接層做準(zhǔn)備
x = x.view(-1, 512)
x = self.fc(x)
return x
4. 構(gòu)建損失函數(shù)和優(yōu)化器
損失函數(shù)采用CrossEntropyLoss
優(yōu)化器采用 SGD 隨機梯度優(yōu)化算法
# 構(gòu)造損失函數(shù)和優(yōu)化器
criterion = nn.CrossEntropyLoss()
opt = optim.SGD(model.parameters(), lr=0.01, momentum=0.8, weight_decay=0.001)
# 動態(tài)更新學(xué)習(xí)率------每隔step_size : lr = lr * gamma
schedule = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.6, last_epoch=-1)
5.完整代碼
# -*- codeing = utf-8 -*-
# @Software : PyCharm
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from matplotlib import pyplot as plt
import time
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# transforms.RandomHorizontalFlip(p=0.5)---以0.5的概率對圖片做水平橫向翻轉(zhuǎn)
transform_train = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
# transforms.ToTensor()---shape從(H,W,C)->(C,H,W), 每個像素點從(0-255)映射到(0-1):直接除以255
# transforms.Normalize---先將輸入歸一化到(0,1),像素點通過"(x-mean)/std",將每個元素分布到(-1,1)
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(std=(0.485, 0.456, 0.406), mean=(0.226, 0.224, 0.225))])
train_dataset = datasets.CIFAR10(root="../DataSet/cifar10", train=True, transform=transform_train,
download=True)
test_dataset = datasets.CIFAR10(root="../DataSet/cifar10", train=False, transform=transform,
download=True)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
# 構(gòu)建 VGGNet16 網(wǎng)絡(luò)模型
class VGGNet16(nn.Module):
def __init__(self):
super(VGGNet16, self).__init__()
self.Conv1 = nn.Sequential(
# CIFAR10 數(shù)據(jù)集是彩色圖 - RGB三通道, 所以輸入通道為 3, 圖片大小為 32*32
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
# inplace-選擇是否對上層傳下來的tensor進(jìn)行覆蓋運算, 可以有效地節(jié)省內(nèi)存/顯存
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
# 池化層
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.Conv2 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.Conv3 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.Conv4 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.Conv5 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
# 全連接層
self.fc = nn.Sequential(
nn.Linear(512, 256),
nn.ReLU(inplace=True),
# 使一半的神經(jīng)元不起作用,防止參數(shù)量過大導(dǎo)致過擬合
nn.Dropout(0.5),
nn.Linear(256, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(128, 10)
)
def forward(self, x):
# 四個卷積層
x = self.Conv1(x)
x = self.Conv2(x)
x = self.Conv3(x)
x = self.Conv4(x)
x = self.Conv5(x)
# 數(shù)據(jù)平坦化處理士葫,為接下來的全連接層做準(zhǔn)備
x = x.view(-1, 512)
x = self.fc(x)
return x
# 初始化模型
model = VGGNet16().to(device)
# 構(gòu)造損失函數(shù)和優(yōu)化器
criterion = nn.CrossEntropyLoss()
opt = optim.SGD(model.parameters(), lr=0.01, momentum=0.8, weight_decay=0.001)
# 動態(tài)更新學(xué)習(xí)率------每隔step_size : lr = lr * gamma
schedule = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.6, last_epoch=-1)
loss_list = []
# train
def train(epoch):
start = time.time()
for epoch in range(epoch):
running_loss = 0.0
for i, (inputs, labels) in enumerate(train_loader, 0):
inputs, labels = inputs.to(device), labels.to(device)
# 將數(shù)據(jù)送入模型訓(xùn)練
outputs = model(inputs)
# 計算損失
loss = criterion(outputs, labels).to(device)
# 重置梯度
opt.zero_grad()
# 計算梯度乞而,反向傳播
loss.backward()
# 根據(jù)反向傳播的梯度值優(yōu)化更新參數(shù)
opt.step()
running_loss += loss.item()
loss_list.append(loss.item())
# 每一百個 batch 查看一下 loss
if (i + 1) % 100 == 0:
print('epoch = %d , batch = %d , loss = %.6f' % (epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
# 每一輪結(jié)束輸出一下當(dāng)前的學(xué)習(xí)率 lr
lr_1 = opt.param_groups[0]['lr']
print("learn_rate:%.15f" % lr_1)
schedule.step()
end = time.time()
# 計算并打印輸出你的訓(xùn)練時間
print("time:{}".format(end - start))
# 訓(xùn)練過程可視化
plt.plot(loss_list)
plt.ylabel('loss')
plt.xlabel('Epoch')
plt.savefig('./train_img.png')
plt.show()
# Test
def verify():
model.eval()
correct = 0.0
total = 0
# 訓(xùn)練模式不需要反向傳播更新梯度
with torch.no_grad():
print("=========================test=========================")
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
pred = outputs.argmax(dim=1) # 返回每一行中最大值元素索引
total += inputs.size(0)
correct += torch.eq(pred, labels).sum().item()
print("Accuracy of the network on the 10000 test images:%.2f %%" % (100 * correct / total))
print("======================================================")
if __name__ == '__main__':
train(100)
verify()
# VGGNet: 所有卷積層全部使用使用3*3的卷積核, 兩個3*3=一個5*5 同時可以減少參數(shù)量, 加深神經(jīng)網(wǎng)絡(luò)的深度
# 使用 VGGNet-16 的神經(jīng)網(wǎng)絡(luò)訓(xùn)練 CIFAR10 數(shù)據(jù)集的準(zhǔn)確率在 82% 左右