首先我們先來(lái)看看resnet34的模型圖
詳細(xì)的resnet34模型
以上的相同的顏色卷積核相同但是注意虛線的部分殘差嘁信,輸入和輸出維度不一樣不能相加贝攒,需要在代碼中定義downsample對(duì)輸入進(jìn)行卷積操作使其維度一致。
詳細(xì)代碼
import torch.nn as nn
import torch
from torch.autograd import Variable as V
# 定義最基本的殘差模塊
class ResidualBlock(nn.Module):
def __init__(self, in_channel, out_channel, stride=1, downsample=None):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3,
stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(out_channel)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.downsample = downsample
def forward(self,x):
identity = x
# downsample定義的是否是虛線的殘差模塊冗荸,如果是那么在x = F(x) + x時(shí)覆致,
# 輸入的x的維度和輸出的F(x)的維度就不相等,需要對(duì)輸入的x進(jìn)行下采樣操作望抽。
if self.downsample is not None:
identity = self.downsample(x)
# 第一次卷積操作
out = self.conv1(x)
out = self.bn1(out) # 批歸一化
out = self.relu(out) # 激活函數(shù)
# 第二次卷積操作
out = self.conv2(out)
out = self.bn2(out)
# 輸出的模型加上輸入的模型加矛,
out += identity
out = self.relu(out)
return out
class ResNet34(nn.Module):
def __init__(self,num_classes=1000):
super(ResNet34,self).__init__()
#對(duì)輸入的rgb三維的圖像進(jìn)行卷積
self.conv1 = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=7,stride=2,
padding=3,bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.max_pool = nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
# 定義各層的殘差結(jié)構(gòu)
self.layer1 = self.make_layer(in_channel=64, out_channel=64,num_block=3,stride=1)
self.layer2 = self.make_layer(in_channel=64, out_channel=128,num_block=4,stride=2)
self.layer3 = self.make_layer(in_channel=128, out_channel=256,num_block=6,stride=2)
self.layer4 = self.make_layer(in_channel=256, out_channel=512,num_block=3,stride=2)
#上面輸出的結(jié)果是7x7x512的向量,對(duì)這個(gè)向量進(jìn)行平均池化的得到1x512的向量
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# 定義線性層煤篙,讓512維的向量映射到1000維的維度上斟览。
self.fc = nn.Linear(512, num_classes)
def make_layer(self,in_channel, out_channel,num_block,stride=1):
downsample = None
if stride==2:
downsample = nn.Sequential(
nn.Conv2d(in_channels=in_channel,out_channels=out_channel,
kernel_size=1,stride=2,bias=False),
nn.BatchNorm2d(out_channel)
)
layers=[]
# 每一層的第一個(gè)殘差模塊定義,需要定義downsample來(lái)確定是否需要對(duì)輸入做變換
layers.append(ResidualBlock(in_channel=in_channel,out_channel=out_channel,
downsample=downsample,stride=stride))
#當(dāng)前層的后面幾個(gè)殘差模塊的輸入和輸出就是一樣的舰蟆。
for i in range(1,num_block):
layers.append(ResidualBlock(in_channel=out_channel,out_channel=out_channel,stride=1))
return nn.Sequential(*layers)
#定義正向傳播的過(guò)程
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.max_pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
model = ResNet34() # 創(chuàng)建ResNet殘差網(wǎng)絡(luò)結(jié)構(gòu)的模型的實(shí)例
input = V(torch.randn(1,3,224,224)) # 創(chuàng)建輸入數(shù)據(jù)趣惠,3表示的rgb三維圖像
output = model(input) # 把數(shù)據(jù)傳入模型,結(jié)果就是處理以后的數(shù)據(jù)
print(output.size()) # 輸出運(yùn)行的結(jié)果
輸出結(jié)果
D:\python3\anaconda\envs\pytorch\python.exe D:/python3/PythonProgram/smartAbulm/resnet34.py
torch.Size([1, 1000])
Process finished with exit code 0