調(diào)試手寫(xiě)數(shù)字識(shí)別代碼時(shí)出現(xiàn)的問(wèn)題,將cpu的代碼改用gpu訓(xùn)練時(shí)雖然可以訓(xùn)練灿里,詳見(jiàn)上一條博客(Mnist手寫(xiě)數(shù)字識(shí)別cpu訓(xùn)練與gpu訓(xùn)練)关炼,但是會(huì)出現(xiàn)Error。查找資料后以下是解決過(guò)程匣吊。
一儒拂、調(diào)整前代碼&調(diào)整后代碼
1、前
import torch
from torchvision import datasets, transforms
import torch.nn as nn
import torch.optim as optim
from datetime import datetime
# 添加
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 添加
class Config:
batch_size = 64
epoch = 10
momentum = 0.9
alpha = 1e-3
print_per_step = 100
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
# 3*3的卷積
self.conv1 = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, 2), #kernel_size卷積核大小 stride卷積步長(zhǎng) padding特征圖填充
nn.ReLU(),
nn.MaxPool2d(2, 2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2) #2*2的最大池化層
)
self.fc1 = nn.Sequential(
nn.Linear(64 * 5 * 5, 128),
nn.BatchNorm1d(128),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(128, 64),
nn.BatchNorm1d(64), # 加快收斂速度的方法(注:批標(biāo)準(zhǔn)化一般放在全連接層后面色鸳,激活函數(shù)層的前面)
nn.ReLU()
)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class TrainProcess:
def __init__(self):
self.train, self.test = self.load_data()
#修改
self.net = LeNet().to(device)
#修改
self.criterion = nn.CrossEntropyLoss() # 定義損失函數(shù)
self.optimizer = optim.SGD(self.net.parameters(), lr=Config.alpha, momentum=Config.momentum)
@staticmethod
def load_data():
print("Loading Data......")
"""加載MNIST數(shù)據(jù)集侣灶,本地?cái)?shù)據(jù)不存在會(huì)自動(dòng)下載"""
train_data = datasets.MNIST(root='./data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_data = datasets.MNIST(root='./data/',
train=False,
transform=transforms.ToTensor())
# 返回一個(gè)數(shù)據(jù)迭代器
# shuffle:是否打亂順序
train_loader = torch.utils.data.DataLoader(dataset=train_data,
batch_size=Config.batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_data,
batch_size=Config.batch_size,
shuffle=False)
return train_loader, test_loader
def train_step(self):
steps = 0
start_time = datetime.now()
print("Training & Evaluating......")
for epoch in range(Config.epoch):
print("Epoch {:3}".format(epoch + 1))
for data, label in self.train:
# 修改
data, label = data.to(device),label.to(device)
# 修改
self.optimizer.zero_grad() # 將梯度歸零
outputs = self.net(data) # 將數(shù)據(jù)傳入網(wǎng)絡(luò)進(jìn)行前向運(yùn)算
loss = self.criterion(outputs, label) # 得到損失函數(shù)
loss.backward() # 反向傳播
self.optimizer.step() # 通過(guò)梯度做一步參數(shù)更新
# 每100次打印一次結(jié)果
if steps % Config.print_per_step == 0:
_, predicted = torch.max(outputs, 1)
correct = int(sum(predicted == label))
accuracy = correct / Config.batch_size # 計(jì)算準(zhǔn)確率
end_time = datetime.now()
time_diff = (end_time - start_time).seconds
time_usage = '{:3}m{:3}s'.format(int(time_diff / 60), time_diff % 60)
msg = "Step {:5}, Loss:{:6.2f}, Accuracy:{:8.2%}, Time usage:{:9}."
print(msg.format(steps, loss, accuracy, time_usage))
steps += 1
test_loss = 0.
test_correct = 0
for data, label in self.test:
# 修改
data, label = data.to(device),label.to(device)
# 修改
outputs = self.net(data)
loss = self.criterion(outputs, label)
test_loss += loss * Config.batch_size
_, predicted = torch.max(outputs, 1)
correct = int(sum(predicted == label))
test_correct += correct
accuracy = test_correct / len(self.test.dataset)
loss = test_loss / len(self.test.dataset)
print("Test Loss: {:5.2f}, Accuracy: {:6.2%}".format(loss, accuracy))
end_time = datetime.now()
time_diff = (end_time - start_time).seconds
print("Time Usage: {:5.2f} mins.".format(time_diff / 60.))
if __name__ == "__main__":
p = TrainProcess()
p.train_step()
報(bào)錯(cuò):RuntimeError: CUDA out of memory. Tried to allocate 20.00 MiB (GPU 0; 4.00 GiB total capacity; 2.44 GiB already allocated; 0 bytes free; 2.45 GiB reserved in total by PyTorch)
[圖片上傳失敗...(image-433dd1-1649150131900)]
2、后
import torch
from torchvision import datasets, transforms
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from datetime import datetime
# 添加
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 添加
class Config:
batch_size = 64
epoch = 10
momentum = 0.9
alpha = 1e-3
print_per_step = 100
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
# 3*3的卷積
self.conv1 = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, 2), #kernel_size卷積核大小 stride卷積步長(zhǎng) padding特征圖填充
nn.ReLU(),
nn.MaxPool2d(2, 2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2) #2*2的最大池化層
)
self.fc1 = nn.Sequential(
nn.Linear(64 * 5 * 5, 128),
nn.BatchNorm1d(128),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(128, 64),
nn.BatchNorm1d(64), # 加快收斂速度的方法(注:批標(biāo)準(zhǔn)化一般放在全連接層后面缕碎,激活函數(shù)層的前面)
nn.ReLU()
)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class TrainProcess:
def __init__(self):
self.train, self.test = self.load_data()
#修改
self.net = LeNet().to(device)
#修改
self.criterion = nn.CrossEntropyLoss() # 定義損失函數(shù)
self.optimizer = optim.SGD(self.net.parameters(), lr=Config.alpha, momentum=Config.momentum)
@staticmethod
def load_data():
print("Loading Data......")
"""加載MNIST數(shù)據(jù)集,本地?cái)?shù)據(jù)不存在會(huì)自動(dòng)下載"""
train_data = datasets.MNIST(root='./data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_data = datasets.MNIST(root='./data/',
train=False,
transform=transforms.ToTensor())
# 返回一個(gè)數(shù)據(jù)迭代器
# shuffle:是否打亂順序
train_loader = torch.utils.data.DataLoader(dataset=train_data,
batch_size=Config.batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_data,
batch_size=Config.batch_size,
shuffle=False)
return train_loader, test_loader
def train_step(self):
steps = 0
start_time = datetime.now()
print("Training & Evaluating......")
for epoch in range(Config.epoch):
print("Epoch {:3}".format(epoch + 1))
for data, label in self.train:
# 修改
data, label = data.to(device),label.to(device)
# 修改
self.optimizer.zero_grad() # 將梯度歸零
outputs = self.net(data) # 將數(shù)據(jù)傳入網(wǎng)絡(luò)進(jìn)行前向運(yùn)算
loss = self.criterion(outputs, label) # 得到損失函數(shù)
loss.backward() # 反向傳播
self.optimizer.step() # 通過(guò)梯度做一步參數(shù)更新
# 每100次打印一次結(jié)果
if steps % Config.print_per_step == 0:
_, predicted = torch.max(outputs, 1)
correct = int(sum(predicted == label))
accuracy = correct / Config.batch_size # 計(jì)算準(zhǔn)確率
end_time = datetime.now()
time_diff = (end_time - start_time).seconds
time_usage = '{:3}m{:3}s'.format(int(time_diff / 60), time_diff % 60)
msg = "Step {:5}, Loss:{:6.2f}, Accuracy:{:8.2%}, Time usage:{:9}."
print(msg.format(steps, loss, accuracy, time_usage))
steps += 1
test_loss = 0.
test_correct = 0
for data, label in self.test:
with torch.no_grad():# 修改
data, label = data.to(device),label.to(device)
outputs = self.net(data)
loss = self.criterion(outputs, label)
test_loss += loss * Config.batch_size
_, predicted = torch.max(outputs, 1)
correct = int(sum(predicted == label))
test_correct += correct
accuracy = test_correct / len(self.test.dataset)
loss = test_loss / len(self.test.dataset)
print("Test Loss: {:5.2f}, Accuracy: {:6.2%}".format(loss, accuracy))
end_time = datetime.now()
time_diff = (end_time - start_time).seconds
print("Time Usage: {:5.2f} mins.".format(time_diff / 60.))
if __name__ == "__main__":
print(device)
p = TrainProcess()
p.train_step()
運(yùn)行結(jié)果:
[圖片上傳失敗...(image-14b58-1649150131900)]
做了修改后便不會(huì)報(bào)該錯(cuò)誤了池户。
二咏雌、解決方法
方法一:調(diào)整batch_size大小
網(wǎng)上的解決方法大多讓調(diào)整batch_size大小凡怎,但是我在調(diào)整后,并沒(méi)有解決問(wèn)題赊抖。
方法二:不計(jì)算梯度
使用with torch.no_grad():
給出一篇博主寫(xiě)的博客:pytorch運(yùn)行錯(cuò)誤:CUDA out of memory.
注:本文使用的就是方法二解決了問(wèn)題统倒。
方法三:釋放內(nèi)存
在報(bào)錯(cuò)代碼前加上以下代碼,釋放無(wú)關(guān)內(nèi)存:
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
參考博客:解決:RuntimeError: CUDA out of memory. Tried to allocate 2.00 MiB