Google InceptionNet
import torch
from torch import nn
class Inception(nn.Module):
def __init__(self, inch, n1x1, n3x3_reduce, n3x3, n5x5_reduce, n5x5, pool_proj):
super(Inception,self).__init__()
self.branch1 = nn.Sequential(
nn.Conv2d(inch, n1x1, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(n1x1),
nn.ReLU(inplace=True)
)
self.branch2 = nn.Sequential(
nn.Conv2d(inch, n3x3_reduce, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(n3x3_reduce),
nn.ReLU(inplace=True),
nn.Conv2d(n3x3_reduce, n3x3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(inplace=True)
)
self.branch3 = nn.Sequential(
nn.Conv2d(inch, n5x5_reduce, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(n5x5_reduce),
nn.ReLU(inplace=True),
nn.Conv2d(n5x5_reduce, n5x5, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(n5x5),
nn.ReLU(inplace=True)
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(inch, pool_proj, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(pool_proj),
nn.ReLU(inplace=True)
)
def forward(self, x):
return torch.cat(
[
self.branch1(x),
self.branch2(x),
self.branch3(x),
self.branch4(x)
], dim=1
)
class GoogleNet(nn.Module):
def __init__(self, num_class = 1000):
super(GoogleNet, self).__init__()
self.prelayer = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3), #output 112*112*64
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1), #output 56*56*64
nn.Conv2d(64, 192, kernel_size=3, stride=1, padding=1), #output 56*56*192
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1), #output 28*28*192
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) #output 28*28*256
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) #output 28*28*480
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) #output 14*14*480
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) #output 14*14*512
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) #output 14*14*512
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) #output 14*14*512
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) #output 14*14*528
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) #output 14*14*832
self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) #output 7*7*832
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) #output 7*7*832
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) #output 7*7*1024
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout2d(p=0.4)
self.linear = nn.Linear(1024, num_class)
def forward(self, x):
x = self.prelayer(x)
x = self.a3(x)
x= self.b3(x)
x = self.maxpool1(x)
x = self.a4(x)
x = self.b4(x)
x = self.c4(x)
x = self.d4(x)
x = self.e4(x)
x = self.maxpool2(x)
x = self.a5(x)
x = self.b5(x)
x = self.avgpool(x)
x = self.dropout(x)
x = torch.flatten(x, start_dim=1)
x = self.linear(x)
return x
ResNet:
import torch
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
# Shortcut connection
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += self.shortcut(x) # Add the shortcut
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, num_class=1000):
super(ResNet, self).__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(ResidualBlock, 64, 3, stride=1)
self.layer2 = self._make_layer(ResidualBlock, 128, 4, stride=2)
self.layer3 = self._make_layer(ResidualBlock, 256, 6, stride=2)
self.layer4 = self._make_layer(ResidualBlock, 512, 3, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512, num_class)
self.softmax = nn.Softmax(dim=1)
def _make_layer(self, block, out_channels, num_blocks, stride =1):
strides = [stride] + [1]*(num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self,x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.softmax(x)
return x```