https://blog.csdn.net/sinat_42239797/article/details/90641659
#***************************一些必要的包的調(diào)用********************************
import torch.nn.functional as F
import torch
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.models as models
from torchvision import transforms, utils
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import numpy as np
import torch.optim as optim
import os
#***************************初始化一些函數(shù)********************************
#torch.cuda.set_device(gpu_id)#使用GPU
learning_rate = 0.0001#學(xué)習(xí)率的設(shè)置
#*************************************數(shù)據(jù)集的設(shè)置****************************************************************************
root =os.getcwd()+"\\"#數(shù)據(jù)集的地址
#定義讀取文件的格式
def default_loader(path):
return Image.open(path).convert('RGB')
class MyDataset(Dataset):
#創(chuàng)建自己的類: MyDataset,這個類是繼承的torch.utils.data.Dataset
#********************************** #使用__init__()初始化一些需要傳入的參數(shù)及數(shù)據(jù)集的調(diào)用**********************
def __init__(self,txt, transform=None,target_transform=None, loader=default_loader):
super(MyDataset,self).__init__()
imgs = []
#對繼承自父類的屬性進行初始化
fh = open(txt, 'r')
#按照傳入的路徑和txt文本參數(shù),以只讀的方式打開這個文本
for line in fh: #迭代該列表#按行循環(huán)txt文本中的內(nèi)
line = line.strip('\n')
line = line.rstrip('\n')
# 刪除 本行string 字符串末尾的指定字符苍鲜,這個方法的詳細介紹自己查詢python
words = line.split()
#用split將該行分割成列表 split的默認參數(shù)是空格,所以不傳遞任何參數(shù)時分割空格
imgs.append((words[0],int(words[1])))
#把txt里的內(nèi)容讀入imgs列表保存玷犹,具體是words幾要看txt內(nèi)容而定
# 很顯然混滔,根據(jù)我剛才截圖所示txt的內(nèi)容,words[0]是圖片信息歹颓,words[1]是lable
self.imgs = imgs
self.transform = transform
self.target_transform = target_transform
self.loader = loader
#*************************** #使用__getitem__()對數(shù)據(jù)進行預(yù)處理并返回想要的信息**********************
def __getitem__(self, index):#這個方法是必須要有的坯屿,用于按照索引讀取每個元素的具體內(nèi)容
fn, label = self.imgs[index]
#fn是圖片path #fn和label分別獲得imgs[index]也即是剛才每行中word[0]和word[1]的信息
img = self.loader(fn)
# 按照路徑讀取圖片
if self.transform is not None:
img = self.transform(img)
#數(shù)據(jù)標(biāo)簽轉(zhuǎn)換為Tensor
return img,label
#return回哪些內(nèi)容,那么我們在訓(xùn)練時循環(huán)讀取每個batch時巍扛,就能獲得哪些內(nèi)容
#********************************** #使用__len__()初始化一些需要傳入的參數(shù)及數(shù)據(jù)集的調(diào)用**********************
def __len__(self):
#這個函數(shù)也必須要寫领跛,它返回的是數(shù)據(jù)集的長度,也就是多少張圖片撤奸,要和loader的長度作區(qū)分
return len(self.imgs)
train_data=MyDataset(txt=root+'train.txt', transform=transforms.ToTensor())
test_data = MyDataset(txt=root+'text.txt', transform=transforms.ToTensor())
#然后就是調(diào)用DataLoader和剛剛創(chuàng)建的數(shù)據(jù)集吠昭,來創(chuàng)建dataloader,這里提一句胧瓜,loader的長度是有多少個batch矢棚,所以和batch_size有關(guān)
train_loader = DataLoader(dataset=train_data, batch_size=32, shuffle=True,num_workers=0)
test_loader = DataLoader(dataset=test_data, batch_size=32, shuffle=False,num_workers=0)
print('num_of_trainData:', len(train_loader))
print('num_of_testData:', len(test_loader))
要使用上面的代碼,首先要制作供調(diào)用的txt文件府喳,我的文件夾里面的內(nèi)容是這樣的
我的文件夾.JPG
train里面的數(shù)據(jù)如下蒲肋,一共10類,10個數(shù)字钝满,我預(yù)先resize成224x224
train文件夾.JPG
用的制作txt的代碼如下
import os
dir = 'D:/cv/TEST/alexnet/test/'#圖片文件的地址
#os.listdir的結(jié)果就是一個list集兜粘,可以使用list的sort方法來排序。如果文件名中有數(shù)字弯蚜,就用數(shù)字的排序
files = os.listdir(dir)#列出dirname下的目錄和文件
files.sort()#排序
text = open('./text.txt', 'a')
for file in files:
label = file.split('.')[0].split('_')[-1]
name = str(dir) +file + ' ' + label +'\n'
text.write(name)
text.close()
dir = 'D:/cv/TEST/alexnet/train/'#圖片文件的地址
#os.listdir的結(jié)果就是一個list集孔轴,可以使用list的sort方法來排序。如果文件名中有數(shù)字熟吏,就用數(shù)字的排序
files = os.listdir(dir)#列出dirname下的目錄和文件
files.sort()#排序
train = open('./train.txt','a')
for file in files:
label = file.split('.')[0].split('_')[-1]
name = str(dir) +file + ' ' + label +'\n'
train.write(name)
train.close()
dir = 'D:/cv/TEST/alexnet/vaild/'#圖片文件的地址
#os.listdir的結(jié)果就是一個list集距糖,可以使用list的sort方法來排序。如果文件名中有數(shù)字牵寺,就用數(shù)字的排序
files = os.listdir(dir)#列出dirname下的目錄和文件
files.sort()#排序
vaild = open('./vaild.txt','a')
for file in files:
label = file.split('.')[0].split('_')[-1]
name = str(dir) +file + ' ' + label +'\n'
vaild.write(name)
vaild.close()
用的是官網(wǎng)的模型悍引,沒有想到pytorch做模型怎么簡單
https://pytorch.org/docs/master/_modules/torchvision/models/alexnet.html#alexnet
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
net = AlexNet()
net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
"""
###畫圖
import matplotlib.pyplot as plt
import numpy as np
import torchvision
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
# show images
print(images[0])
imshow(torchvision.utils.make_grid(images[0]))
# print labels
print(' '.join('%5s' % labels[j] for j in range(32)))
###畫圖結(jié)束
"""
# 訓(xùn)練網(wǎng)絡(luò)
# 迭代epoch
for epoch in range(30):
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the input
inputs, labels = data
# zeros the paramster gradients
optimizer.zero_grad() #
# print(inputs)
# forward + backward + optimize
outputs = net(inputs.cuda())
loss = criterion(outputs, labels.cuda()) # 計算loss
#print(outputs,labels)
loss.backward() # loss 求導(dǎo)
optimizer.step() # 更新參數(shù)
# print statistics
running_loss += loss.item() # tensor.item() 獲取tensor的數(shù)值
if i % 50 == 49:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 5)) # 每2000次迭代,輸出loss的平均值
running_loss = 0.0
print('Finished Training')
# 保存
torch.save(net, 'model.pth')
def evaluteTop1(model, loader):
model.eval()
correct = 0
total = len(loader.dataset)
for x,y in loader:
x,y = x.cuda(), y.cuda()
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
#correct += torch.eq(pred, y).sum().item()
return correct / total
model = AlexNet()
model = torch.load('model.pth')
print(evaluteTop1(model,test_loader))
"""
###錯誤的圖片帽氓,要上面的imshow代碼
#net.eval()
eval_loss = 0.
eval_acc = 0.
errorimg = []
for i, data in enumerate(test_loader, 0):
inputs, labels = data
outputs = model(inputs.cuda())
pred = torch.max(outputs, 1)[1]
for i in range(len(labels)):
if labels[i] != pred[i]:
errorimg.append(inputs[i])
imshow(torchvision.utils.make_grid(errorimg))
###
"""
圖片及處理代碼和很潦草的jupyter文件
鏈接: https://pan.baidu.com/s/19sA7brPbKbv9tPByuY2Rkw 提取碼: t2ih 復(fù)制這段內(nèi)容后打開百度網(wǎng)盤手機App趣斤,操作更方便哦
訓(xùn)練出的模型
鏈接: https://pan.baidu.com/s/1WAevnAAJA8J5JHWaStJ_NQ 提取碼: b9we