幾個(gè)卷積神經(jīng)網(wǎng)絡(luò)Backbone網(wǎng)絡(luò)結(jié)構(gòu)

做個(gè)筆記
VGG16

import torch
from torch import nn
import numpy as np


class VGG16(nn.Module):
    def __init__(self):
        super(VGG16, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 128, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 128, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(128, 256, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(256, 512, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(512, 512, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2)
        )
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(7, 7))
        self.classifier = nn.Sequential(
            nn.Linear(25088, 4096, bias=True),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(4096, 4096, bias=True),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(4096, 1000, bias=True)
        )

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


if __name__ == '__main__':
    image = np.random.randn(1, 3, 224, 224)
    image = torch.tensor(image, dtype=torch.float32)

    model = VGG16()
    print(model)
    output = model(image)
    print(output.shape)

這么堆疊寫(xiě)是因?yàn)槲蚁肟唇Y(jié)構(gòu)呀盒揉,所以沒(méi)有抽出公共代碼。
AlexNet

import torch
from torch import nn
import numpy as np


class AlexNet(nn.Module):

    def __init__(self):
        super().__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, 11, 4, 2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, 2),
            nn.Conv2d(64, 192, 5, 1, 2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, 2),
            nn.Conv2d(192, 384, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, 2)
        )
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(6, 6))
        self.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(9216, 4096, bias=True),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(4096, 4096, bias=True),
            nn.ReLU(inplace=True),
            nn.Linear(4096, 1000, bias=True)
        )

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


if __name__ == '__main__':
    image = np.random.randn(1, 3, 224, 224)
    image = torch.tensor(image, dtype=torch.float32)
    model = AlexNet()
    output = model(image)
    print(output.shape)

ResNet18

import torch
from torch import nn
import numpy as np


class BasicBlock(nn.Module):
    def __init__(self, in_channel, out_channel, stride=1, isDownsample=False):
        super(BasicBlock, self).__init__()
        self.isDownsample = isDownsample
        self.conv1 = nn.Conv2d(in_channel, out_channel, 3, stride, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channel)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(out_channel, out_channel, 3, 1, 1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channel)

        self.downsample = None
        if isDownsample:
            self.downsample = nn.Sequential(
                nn.Conv2d(in_channel, out_channel, 1, 2, bias=False),
                nn.BatchNorm2d(out_channel)
            )

    def forward(self, x):
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)

        if self.isDownsample:
            out += self.downsample(x)
        return out


class ResNet(nn.Module):

    def __init__(self):
        super(ResNet, self).__init__()

        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(3, 2, padding=1)

        self.layer1 = nn.Sequential(
            BasicBlock(64, 64),
            BasicBlock(64, 64)
        )

        self.layer2 = nn.Sequential(
            BasicBlock(64, 128, stride=2, isDownsample=True),
            BasicBlock(128, 128)

        )

        self.layer3 = nn.Sequential(
            BasicBlock(128, 256, stride=2, isDownsample=True),
            BasicBlock(256, 256)
        )

        self.layer4 = nn.Sequential(
            BasicBlock(256, 512, stride=2, isDownsample=True),
            BasicBlock(512, 512)
        )
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
        self.fc = nn.Linear(512, 1000, bias=True)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x


if __name__ == '__main__':
    model = ResNet()
    image = np.random.randn(1, 3, 224, 224)
    image = torch.tensor(image, dtype=torch.float32)
    output = model(image)
    print(model)
    print(output.shape)

GoogleLeNet
output是主干輸出宴猾, output_1是第一個(gè)輔助輸出枢析,output_2是第二個(gè)輔助輸出

import torch
from torch import nn
import numpy as np


class BasicConv2d(nn.Module):

    def __init__(self, in_channel, out_channel, kernel_size, stride, padding=0):
        super(BasicConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding, bias=False)
        self.bn = nn.BatchNorm2d(out_channel)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return x


class InceptionAux(nn.Module):

    def __init__(self, in_channel, out_channel):
        super(InceptionAux, self).__init__()
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(4, 4))
        self.conv = BasicConv2d(in_channel, out_channel, 1, 1)
        self.fc1 = nn.Linear(2048, 1024, bias=True)
        self.fc2 = nn.Linear(1024, 1000, bias=True)

    def forward(self, x):
        x = self.avgpool(x)
        x = self.conv(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = self.fc2(x)
        return x


class Inception(nn.Module):

    def __init__(self, in_channel, out_branch1, out_branch2_1, out_branch2_2, out_branch3_1, out_branch3_2,
                 out_branch4):
        super(Inception, self).__init__()
        self.branch1 = BasicConv2d(in_channel, out_branch1, 1, 1)
        self.branch2 = nn.Sequential(
            BasicConv2d(in_channel, out_branch2_1, 1, 1),
            BasicConv2d(out_branch2_1, out_branch2_2, 3, 1, 1)
        )
        self.branch3 = nn.Sequential(
            BasicConv2d(in_channel, out_branch3_1, 1, 1),
            BasicConv2d(out_branch3_1, out_branch3_2, 3, 1, 1)
        )
        self.branch4 = nn.Sequential(
            nn.MaxPool2d(3, 1, padding=1),
            BasicConv2d(in_channel, out_branch4, 1, 1)
        )

    def forward(self, x):
        # print('0:', x.shape)
        branch1 = self.branch1(x)
        # print('1:', branch1.shape)
        branch2 = self.branch2(x)
        # print('2:', branch2.shape)
        branch3 = self.branch3(x)
        # print('3:', branch3.shape)
        branch4 = self.branch4(x)
        # print('4:', branch4.shape)
        out = torch.cat((branch1, branch2, branch3, branch4), dim=1)
        return out


class GoogLeNet(nn.Module):

    def __init__(self):
        super(GoogLeNet, self).__init__()
        self.conv1 = BasicConv2d(3, 64, 7, 2, 3)
        self.maxpool1 = nn.MaxPool2d(3, 2)
        self.conv2 = BasicConv2d(64, 64, 1, 1)
        self.conv3 = BasicConv2d(64, 192, 3, 1, 1)
        self.maxpool2 = nn.MaxPool2d(3, 2)
        self.inception3a = Inception(in_channel=192, out_branch1=64, out_branch2_1=96, out_branch2_2=128,
                                     out_branch3_1=16, out_branch3_2=32, out_branch4=32)
        self.inception3b = Inception(in_channel=256, out_branch1=128, out_branch2_1=128, out_branch2_2=192,
                                     out_branch3_1=32, out_branch3_2=96, out_branch4=64)
        self.maxpool3 = nn.MaxPool2d(3, 2)
        self.inception4a = Inception(in_channel=480, out_branch1=192, out_branch2_1=96, out_branch2_2=208,
                                     out_branch3_1=16, out_branch3_2=48, out_branch4=64)
        self.inception4b = Inception(in_channel=512, out_branch1=160, out_branch2_1=112, out_branch2_2=224,
                                     out_branch3_1=24, out_branch3_2=64, out_branch4=64)
        self.inception4c = Inception(in_channel=512, out_branch1=128, out_branch2_1=128, out_branch2_2=256,
                                     out_branch3_1=24, out_branch3_2=64, out_branch4=64)
        self.inception4d = Inception(in_channel=512, out_branch1=112, out_branch2_1=144, out_branch2_2=288,
                                     out_branch3_1=32, out_branch3_2=64, out_branch4=64)
        self.inception4e = Inception(in_channel=528, out_branch1=256, out_branch2_1=160, out_branch2_2=320,
                                     out_branch3_1=32, out_branch3_2=128, out_branch4=128)
        self.maxpool4 = nn.MaxPool2d(2, 2)
        self.inception5a = Inception(in_channel=832, out_branch1=256, out_branch2_1=160, out_branch2_2=320,
                                     out_branch3_1=32, out_branch3_2=128, out_branch4=128)
        self.inception5b = Inception(in_channel=832, out_branch1=384, out_branch2_1=192, out_branch2_2=384,
                                     out_branch3_1=48, out_branch3_2=128, out_branch4=128)
        self.aux1 = InceptionAux(512, 128)
        self.aux2 = InceptionAux(528, 128)
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
        self.dropout = nn.Dropout(0.2, inplace=False)
        self.fc = nn.Linear(1024, 1000, bias=True)

    def forward(self, x):
        output = self.conv1(x)
        output = self.maxpool1(output)
        output = self.conv2(output)
        output = self.conv3(output)
        output = self.maxpool2(output)
        output = self.inception3a(output)
        output = self.inception3b(output)
        output = self.maxpool3(output)
        output = self.inception4a(output)

        output_1 = self.aux1(output)
        softmax = nn.Softmax(dim=1)
        output_1 = softmax(output_1)

        output = self.inception4b(output)
        output = self.inception4c(output)
        output = self.inception4d(output)

        output_2 = self.aux2(output)
        output_2 = softmax(output_2)

        output = self.inception4e(output)
        output = self.inception5a(output)
        output = self.inception5b(output)
        output = self.avgpool(output)
        output = torch.flatten(output, 1)
        output = self.fc(output)

        output = softmax(output)

        return [output, output_1, output_2]


if __name__ == '__main__':
    image = np.random.randn(1, 3, 224, 224)
    image = torch.tensor(image, dtype=torch.float32)

    model = GoogLeNet()
    outputs = model(image)
    for out in outputs:
        out = out.detach().numpy()
        max = np.argmax(out, axis=1)
        print(out[0, max])

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
  • 序言:七十年代末酌住,一起剝皮案震驚了整個(gè)濱河市,隨后出現(xiàn)的幾起案子阎抒,更是在濱河造成了極大的恐慌酪我,老刑警劉巖,帶你破解...
    沈念sama閱讀 217,657評(píng)論 6 505
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件且叁,死亡現(xiàn)場(chǎng)離奇詭異都哭,居然都是意外死亡,警方通過(guò)查閱死者的電腦和手機(jī)逞带,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 92,889評(píng)論 3 394
  • 文/潘曉璐 我一進(jìn)店門(mén)欺矫,熙熙樓的掌柜王于貴愁眉苦臉地迎上來(lái),“玉大人展氓,你說(shuō)我怎么就攤上這事穆趴。” “怎么了遇汞?”我有些...
    開(kāi)封第一講書(shū)人閱讀 164,057評(píng)論 0 354
  • 文/不壞的土叔 我叫張陵未妹,是天一觀的道長(zhǎng)。 經(jīng)常有香客問(wèn)我空入,道長(zhǎng)络它,這世上最難降的妖魔是什么? 我笑而不...
    開(kāi)封第一講書(shū)人閱讀 58,509評(píng)論 1 293
  • 正文 為了忘掉前任歪赢,我火速辦了婚禮化戳,結(jié)果婚禮上,老公的妹妹穿的比我還像新娘埋凯。我一直安慰自己点楼,他們只是感情好,可當(dāng)我...
    茶點(diǎn)故事閱讀 67,562評(píng)論 6 392
  • 文/花漫 我一把揭開(kāi)白布递鹉。 她就那樣靜靜地躺著盟步,像睡著了一般。 火紅的嫁衣襯著肌膚如雪躏结。 梳的紋絲不亂的頭發(fā)上却盘,一...
    開(kāi)封第一講書(shū)人閱讀 51,443評(píng)論 1 302
  • 那天,我揣著相機(jī)與錄音媳拴,去河邊找鬼黄橘。 笑死,一個(gè)胖子當(dāng)著我的面吹牛屈溉,可吹牛的內(nèi)容都是我干的塞关。 我是一名探鬼主播,決...
    沈念sama閱讀 40,251評(píng)論 3 418
  • 文/蒼蘭香墨 我猛地睜開(kāi)眼子巾,長(zhǎng)吁一口氣:“原來(lái)是場(chǎng)噩夢(mèng)啊……” “哼帆赢!你這毒婦竟也來(lái)了小压?” 一聲冷哼從身側(cè)響起,我...
    開(kāi)封第一講書(shū)人閱讀 39,129評(píng)論 0 276
  • 序言:老撾萬(wàn)榮一對(duì)情侶失蹤椰于,失蹤者是張志新(化名)和其女友劉穎怠益,沒(méi)想到半個(gè)月后,有當(dāng)?shù)厝嗽跇?shù)林里發(fā)現(xiàn)了一具尸體瘾婿,經(jīng)...
    沈念sama閱讀 45,561評(píng)論 1 314
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡蜻牢,尸身上長(zhǎng)有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 37,779評(píng)論 3 335
  • 正文 我和宋清朗相戀三年,在試婚紗的時(shí)候發(fā)現(xiàn)自己被綠了偏陪。 大學(xué)時(shí)的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片抢呆。...
    茶點(diǎn)故事閱讀 39,902評(píng)論 1 348
  • 序言:一個(gè)原本活蹦亂跳的男人離奇死亡,死狀恐怖笛谦,靈堂內(nèi)的尸體忽然破棺而出抱虐,到底是詐尸還是另有隱情,我是刑警寧澤揪罕,帶...
    沈念sama閱讀 35,621評(píng)論 5 345
  • 正文 年R本政府宣布梯码,位于F島的核電站,受9級(jí)特大地震影響好啰,放射性物質(zhì)發(fā)生泄漏。R本人自食惡果不足惜儿奶,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 41,220評(píng)論 3 328
  • 文/蒙蒙 一框往、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧闯捎,春花似錦椰弊、人聲如沸。這莊子的主人今日做“春日...
    開(kāi)封第一講書(shū)人閱讀 31,838評(píng)論 0 22
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽(yáng)。三九已至茬祷,卻和暖如春清焕,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背祭犯。 一陣腳步聲響...
    開(kāi)封第一講書(shū)人閱讀 32,971評(píng)論 1 269
  • 我被黑心中介騙來(lái)泰國(guó)打工秸妥, 沒(méi)想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留,地道東北人沃粗。 一個(gè)月前我還...
    沈念sama閱讀 48,025評(píng)論 2 370
  • 正文 我出身青樓粥惧,卻偏偏與公主長(zhǎng)得像,于是被迫代替她去往敵國(guó)和親最盅。 傳聞我的和親對(duì)象是個(gè)殘疾皇子突雪,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 44,843評(píng)論 2 354