Pytorch學(xué)習(xí)記錄-深度神經(jīng)網(wǎng)絡(luò)DNN
讓我們來(lái)看看最近實(shí)現(xiàn)的幾個(gè)DEMO形庭。
流程都差不多:
- 生成訓(xùn)練和驗(yàn)證集,生成DataLoader
- 構(gòu)建模型厌漂,最重要的兩個(gè)方法init forward
- 訓(xùn)練模型
- 驗(yàn)證模型
今天實(shí)現(xiàn)RNN萨醒,基于數(shù)據(jù)集還是MNIST
import torch
import torchvision
import torch.nn as nn
import torchvision.transforms as transforms
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
train_dataset = torchvision.datasets.MNIST(root='./data/', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = torchvision.datasets.MNIST(root='./data/', train=False, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
# 構(gòu)建模型
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidder_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidder_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidder_size).to(device)
out, _ = self.lstm(x, (h0, c0))
out = self.fc(out[:, -1, :])
return out
model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)
# 損失函數(shù)和優(yōu)化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# 訓(xùn)練
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
# 測(cè)試
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'RNNmodel.ckpt')
這兩天運(yùn)行的時(shí)候有點(diǎn)問(wèn)題,使用cuda偶爾會(huì)報(bào)錯(cuò)苇倡。
運(yùn)行torch.cuda.current_device()
有時(shí)候說(shuō)找不到……
Epoch [1/2], Step [100/600], Loss: 0.6143
Epoch [1/2], Step [200/600], Loss: 0.2176
Epoch [1/2], Step [300/600], Loss: 0.2322
Epoch [1/2], Step [400/600], Loss: 0.1555
Epoch [1/2], Step [500/600], Loss: 0.0651
Epoch [1/2], Step [600/600], Loss: 0.0269
Epoch [2/2], Step [100/600], Loss: 0.1197
Epoch [2/2], Step [200/600], Loss: 0.1387
Epoch [2/2], Step [300/600], Loss: 0.1049
Epoch [2/2], Step [400/600], Loss: 0.0847
Epoch [2/2], Step [500/600], Loss: 0.0719
Epoch [2/2], Step [600/600], Loss: 0.1006
Test Accuracy of the model on the 10000 test images: 97.37 %