一個(gè)完整的分類程序

#項(xiàng)目目錄結(jié)構(gòu)
|---blog
    |---DataSet
         |---dataset.py
    |---models
         |---AlexNet.py
         |---DenseNet.py
         |---GooleNet.py
         |---VGG.py
    |---pth
    |---train.py
     

以下是每個(gè)文件的具體內(nèi)容:

# dataset.py
from PIL import Image
import torch
import numpy as np
import os
from torchvision import transforms


class MyDataSet(torch.utils.data.Dataset):
    def __init__(self, transform=None):  # 初始化一些需要傳入的參數(shù)
        super(MyDataSet, self).__init__()
        images = []
        # 返回對(duì)應(yīng)的圖片和標(biāo)簽
        image_list = []
        label_list = []
        cwd = 'F:/openvinortest/TEST/' # 數(shù)據(jù)根目錄
        classes = {'cat', 'dog'} #文件夾名稱
        for index, name in enumerate(classes):
            class_path = cwd + name + '/'
            for img_name in os.listdir(class_path):
                image_list.append(class_path + img_name)
                label_list.append(index)
        print("There are %d  class" % (len(image_list)))
        # 組合文件
        temp = np.array([image_list, label_list])
        temp = temp.transpose()  # 轉(zhuǎn)置
        # 對(duì)應(yīng)的打亂順序
        np.random.shuffle(temp)
        image_list = list(temp[:, 0])
        label_list = list(temp[:, 1])
        for i in range(len(image_list)):
            images.append((image_list[i], int(label_list[i])))
        self.imgs = images
        self.transform = transform

    def __getitem__(self, index):
        fn, label = self.imgs[index]
        img = Image.open(fn).convert('RGB')
        if self.transform is not None:
            img = self.transform(img)
        else:
            self.transform = transforms.Compose([transforms.Resize((227, 227)), transforms.ToTensor()])
            img = self.transform(img)
        return img, label

    def __len__(self):
        return len(self.imgs)


# 根據(jù)自己定義的那個(gè)勒MyDataset來創(chuàng)建數(shù)據(jù)集雳锋!注意是數(shù)據(jù)集!而不是loader迭代器
if __name__ == "__main__":
    train_data = MyDataSet()
    train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=64, shuffle=True)
    for batch_index, data in train_loader:
        print(len(batch_index))
# AlexNet.py
import torch.nn as nn


# 圖片大小227
class AlexNet(nn.Module):
    def __init__(self):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4),  # 55*55   還是56
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),  # 27*27
            nn.Conv2d(64, 192, kernel_size=5, padding=2),  # 26 26
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),  # 13 13
            nn.Conv2d(192, 384, kernel_size=3, padding=1),  # 13 13
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),  # 13 13
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),  # 10 10
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),  # 5 5
        )
        self.classifier = nn.Sequential(
            nn.Linear(6 * 6 * 256, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 2),
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(-1, 6 * 6 * 256)
        x = self.classifier(x)
        return x
# DenseNet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict


class _DenseLayer(nn.Sequential):
    def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
        super(_DenseLayer, self).__init__()
        self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
        self.add_module('relu1', nn.ReLU(inplace=True)),
        self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
                                           growth_rate, kernel_size=1, stride=1, bias=False)),
        self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
        self.add_module('relu2', nn.ReLU(inplace=True)),
        self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
                                           kernel_size=3, stride=1, padding=1, bias=False)),
        self.drop_rate = drop_rate

    def forward(self, x):
        new_features = super(_DenseLayer, self).forward(x)
        if self.drop_rate > 0:
            new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
        return torch.cat([x, new_features], 1)


class _DenseBlock(nn.Sequential):
    def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
        super(_DenseBlock, self).__init__()
        for i in range(num_layers):
            layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
            self.add_module('denselayer%d' % (i + 1), layer)


class _Transition(nn.Sequential):
    def __init__(self, num_input_features, num_output_features):
        super(_Transition, self).__init__()
        self.add_module('norm', nn.BatchNorm2d(num_input_features))
        self.add_module('relu', nn.ReLU(inplace=True))
        self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
        self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))


class DenseNet(nn.Module):
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0,
                 num_classes=2):

        super(DenseNet, self).__init__()

        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', nn.BatchNorm2d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal(m.weight.data)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()

    def forward(self, x):
        features = self.features(x)
        out = F.relu(features, inplace=True)
        out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
        out = self.classifier(out)
        return out


def densenet121(**kwargs):
    model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), **kwargs)
    return model


def densenet169(**kwargs):
    model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), **kwargs)
    return model


def densenet201(**kwargs):
    model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs)
    return model


def densenet161(**kwargs):
    model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24), **kwargs)
    return model


if __name__ == '__main__':
    # 'DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161'
    # Example
    net = DenseNet()
    print(net)
# GooleNet.py
import torch
import torch.nn as nn
import torch.nn.functional as F


# 編寫卷積+bn+relu模塊
class BasicConv2d(nn.Module):
    def __init__(self, in_channels, out_channals, **kwargs):
        super(BasicConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channals, **kwargs)
        self.bn = nn.BatchNorm2d(out_channals)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return F.relu(x)


# 編寫Inception模塊
class Inception(nn.Module):
    def __init__(self, in_planes,
                 n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
        super(Inception, self).__init__()
        # 1x1 conv branch
        self.b1 = BasicConv2d(in_planes, n1x1, kernel_size=1)
        # 1x1 conv -> 3x3 conv branch
        self.b2_1x1_a = BasicConv2d(in_planes, n3x3red, kernel_size=1)
        self.b2_3x3_b = BasicConv2d(n3x3red, n3x3, kernel_size=3, padding=1)
        # 1x1 conv -> 3x3 conv -> 3x3 conv branch
        self.b3_1x1_a = BasicConv2d(in_planes, n5x5red, kernel_size=1)
        self.b3_3x3_b = BasicConv2d(n5x5red, n5x5, kernel_size=3, padding=1)
        self.b3_3x3_c = BasicConv2d(n5x5, n5x5, kernel_size=3, padding=1)
        # 3x3 pool -> 1x1 conv branch
        self.b4_pool = nn.MaxPool2d(3, stride=1, padding=1)
        self.b4_1x1 = BasicConv2d(in_planes, pool_planes, kernel_size=1)

    def forward(self, x):
        y1 = self.b1(x)
        y2 = self.b2_3x3_b(self.b2_1x1_a(x))
        print(y2.shape)
        y3 = self.b3_3x3_c(self.b3_3x3_b(self.b3_1x1_a(x)))
        y4 = self.b4_1x1(self.b4_pool(x))
        # y的維度為[batch_size, out_channels, C_out,L_out]
        # 合并不同卷積下的特征圖
        return torch.cat([y1, y2, y3, y4], 1)


class GoogLeNet(nn.Module):
    def __init__(self):
        super(GoogLeNet, self).__init__()
        self.pre_layers = BasicConv2d(3, 192, kernel_size=3, padding=1)
        self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
        self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
        self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
        self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
        self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
        self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
        self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
        self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
        self.avgpool = nn.AvgPool2d(8, stride=1)
        self.drop = nn.Dropout(0.4)
        self.linear = nn.Linear(1024, 2)

    def forward(self, x):
        out = self.pre_layers(x)
        out = self.a3(out)
        out = self.b3(out)
        out = self.maxpool(out)
        out = self.a4(out)
        out = self.b4(out)
        out = self.c4(out)
        out = self.d4(out)
        out = self.e4(out)
        out = self.maxpool(out)
        out = self.a5(out)
        out = self.b5(out)
        out = self.avgpool(out)
        out = self.drop(out)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out
# VGG.py
import torch.nn as nn


# 需繼承torch.nn.Module類
class VGG16(nn.Module):
    def __init__(self):
        super(VGG16, self).__init__()
        # 定義卷積層和池化層,共13層卷積驯击,5層池化
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )
        # 簡化版全連接層
        # self.classifier = nn.Sequential(
        #     nn.Linear(4 * 4 * 512, 1024),
        #     nn.ReLU(),
        #     nn.Dropout(p=0.5),
        #     nn.Linear(1024, 1024),
        #     nn.ReLU(),
        #     nn.Dropout(p=0.5),
        #     nn.Linear(1024, 2)
        # )
        # VGG-16的全連接層
        self.classes = nn.Sequential(
            nn.Linear(7 * 7 * 512, 4096),
            nn.ReLU(),
            nn.Dropout(p=0.5),
            nn.Linear(4096, 4096),
            nn.ReLU(),
            nn.Dropout(p=0.5),
            nn.Linear(4096, 2)
        )

    # 定義每次執(zhí)行的計(jì)算步驟
    def forward(self, x):
        x = self.features(x)
        x = x.view(-1, 7 * 7 * 512)
        x = self.classes(x)
        return x
# train.py
import torch
import time
import cv2
import os
import random
import math
import torchvision
import torch.nn.functional as F
import torch.nn as nn
import matplotlib.pyplot as plt
from DataSet.dataset import MyDataSet
from models.DenseNet import DenseNet
from models.AlexNet import AlexNet

epochNum = 100
batchSize = 16
# 模型類實(shí)例
model = AlexNet()
# 如果GPUs可用然走,則將模型上需要計(jì)算的所有參數(shù)復(fù)制到GPUs上
if torch.cuda.is_available():
    model = model.cuda()
    print('支持cuda,GPU')
# loss值
loss_fn = nn.CrossEntropyLoss()
# 模型優(yōu)化器
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)


def train():
    for epoch in range(1, epochNum + 1):
        train_data = MyDataSet()
        train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=batchSize, shuffle=True,
                                                   drop_last=True)
        # 損失值
        running_loss = 0.0
        # 預(yù)測的正確數(shù)
        running_correct = 0
        for batch, data in enumerate(train_loader, 1):
            x, y = data
            if torch.cuda.is_available():
                x, y = x.cuda(), y.cuda()
            outputs = model(x)
            _, y_pred = torch.max(outputs.detach(), 1)
            # 將Varibale的梯度置零
            optimizer.zero_grad()
            # 計(jì)算損失值
            loss = loss_fn(outputs, y)
            # 反向傳播求導(dǎo)
            loss.backward()
            # 更新所有參數(shù)
            optimizer.step()
            running_loss += loss.detach().item()
            running_correct += torch.sum(y_pred == y)
            if batch % 10 == 0:
                print('Batch {}/{},Train Loss:{:.2f},Train Acc:{:.2f}%'.format(
                    batch, 25000 / batchSize, running_loss / batch,
                           100 * running_correct.item() / (batchSize * batch)
                ))
        epoch_loss = running_loss * batchSize / 25000
        epoch_acc = 100 * running_correct.item() / 25000
        print(' Loss:{:.2f} Acc:{:.2f}%'.format(epoch_loss, epoch_acc))
        torch.save(model.state_dict(), './pth/' + str(batch) + '.pkl')


# train()


# 測試
def test():
    dst = MyDataSet()
    data_loader_test = torch.utils.data.DataLoader(dst, batch_size=1, shuffle=False)
    model.eval()
    # 加載模型
    model.load_state_dict(torch.load('./pth/2.pkl'))
    # 保存測試結(jié)果
    results = []
    count = 0
    # tqdm模塊用于顯示進(jìn)度條
    for imgs, path in data_loader_test:
        if torch.cuda.is_available():
            X = imgs.cuda()
        outputs = model(X)
        # pred表示是哪個(gè)對(duì)象该编,0=cat肚逸,1=dog
        # probability表示是否個(gè)對(duì)象的概率
        probability, pred = torch.max(F.softmax(outputs, dim=1).detach(), dim=1)

        if pred == path.cuda():
            print("OK")
            count = count + 1
    print(count)


# test()
if __name__ == "__main__":
    train()
    test()
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
  • 序言:七十年代末寒跳,一起剝皮案震驚了整個(gè)濱河市聘萨,隨后出現(xiàn)的幾起案子,更是在濱河造成了極大的恐慌童太,老刑警劉巖米辐,帶你破解...
    沈念sama閱讀 222,104評(píng)論 6 515
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件,死亡現(xiàn)場離奇詭異书释,居然都是意外死亡翘贮,警方通過查閱死者的電腦和手機(jī),發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 94,816評(píng)論 3 399
  • 文/潘曉璐 我一進(jìn)店門爆惧,熙熙樓的掌柜王于貴愁眉苦臉地迎上來狸页,“玉大人,你說我怎么就攤上這事检激‰茸剑” “怎么了?”我有些...
    開封第一講書人閱讀 168,697評(píng)論 0 360
  • 文/不壞的土叔 我叫張陵叔收,是天一觀的道長。 經(jīng)常有香客問我傲隶,道長饺律,這世上最難降的妖魔是什么? 我笑而不...
    開封第一講書人閱讀 59,836評(píng)論 1 298
  • 正文 為了忘掉前任跺株,我火速辦了婚禮复濒,結(jié)果婚禮上脖卖,老公的妹妹穿的比我還像新娘。我一直安慰自己巧颈,他們只是感情好畦木,可當(dāng)我...
    茶點(diǎn)故事閱讀 68,851評(píng)論 6 397
  • 文/花漫 我一把揭開白布。 她就那樣靜靜地躺著砸泛,像睡著了一般十籍。 火紅的嫁衣襯著肌膚如雪。 梳的紋絲不亂的頭發(fā)上唇礁,一...
    開封第一講書人閱讀 52,441評(píng)論 1 310
  • 那天勾栗,我揣著相機(jī)與錄音,去河邊找鬼盏筐。 笑死围俘,一個(gè)胖子當(dāng)著我的面吹牛,可吹牛的內(nèi)容都是我干的琢融。 我是一名探鬼主播界牡,決...
    沈念sama閱讀 40,992評(píng)論 3 421
  • 文/蒼蘭香墨 我猛地睜開眼,長吁一口氣:“原來是場噩夢(mèng)啊……” “哼漾抬!你這毒婦竟也來了宿亡?” 一聲冷哼從身側(cè)響起,我...
    開封第一講書人閱讀 39,899評(píng)論 0 276
  • 序言:老撾萬榮一對(duì)情侶失蹤奋蔚,失蹤者是張志新(化名)和其女友劉穎她混,沒想到半個(gè)月后,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體泊碑,經(jīng)...
    沈念sama閱讀 46,457評(píng)論 1 318
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡坤按,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 38,529評(píng)論 3 341
  • 正文 我和宋清朗相戀三年,在試婚紗的時(shí)候發(fā)現(xiàn)自己被綠了馒过。 大學(xué)時(shí)的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片臭脓。...
    茶點(diǎn)故事閱讀 40,664評(píng)論 1 352
  • 序言:一個(gè)原本活蹦亂跳的男人離奇死亡,死狀恐怖腹忽,靈堂內(nèi)的尸體忽然破棺而出来累,到底是詐尸還是另有隱情,我是刑警寧澤窘奏,帶...
    沈念sama閱讀 36,346評(píng)論 5 350
  • 正文 年R本政府宣布嘹锁,位于F島的核電站,受9級(jí)特大地震影響着裹,放射性物質(zhì)發(fā)生泄漏领猾。R本人自食惡果不足惜,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 42,025評(píng)論 3 334
  • 文/蒙蒙 一、第九天 我趴在偏房一處隱蔽的房頂上張望摔竿。 院中可真熱鬧面粮,春花似錦、人聲如沸继低。這莊子的主人今日做“春日...
    開封第一講書人閱讀 32,511評(píng)論 0 24
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽袁翁。三九已至柴底,卻和暖如春,著一層夾襖步出監(jiān)牢的瞬間梦裂,已是汗流浹背似枕。 一陣腳步聲響...
    開封第一講書人閱讀 33,611評(píng)論 1 272
  • 我被黑心中介騙來泰國打工, 沒想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留年柠,地道東北人凿歼。 一個(gè)月前我還...
    沈念sama閱讀 49,081評(píng)論 3 377
  • 正文 我出身青樓,卻偏偏與公主長得像冗恨,于是被迫代替她去往敵國和親答憔。 傳聞我的和親對(duì)象是個(gè)殘疾皇子,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 45,675評(píng)論 2 359