Spaces:
Sleeping
Sleeping
File size: 2,778 Bytes
ef68ae3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class CustomBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(CustomBlock, self).__init__()
self.inner_layer = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.MaxPool2d(kernel_size=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
self.res_block = BasicBlock(out_channels, out_channels)
def forward(self, x):
x = self.inner_layer(x)
r = self.res_block(x)
out = x + r
return out
class CustomResNet(nn.Module):
def __init__(self, num_classes=10):
super(CustomResNet, self).__init__()
self.prep_layer = nn.Sequential(
nn.Conv2d(
in_channels=3,
out_channels=64,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.BatchNorm2d(64),
nn.ReLU(),
)
self.layer_1 = CustomBlock(in_channels=64, out_channels=128)
self.layer_2 = nn.Sequential(
nn.Conv2d(
in_channels=128,
out_channels=256,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.MaxPool2d(kernel_size=2),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.layer_3 = CustomBlock(in_channels=256, out_channels=512)
self.max_pool = nn.Sequential(nn.MaxPool2d(kernel_size=4))
self.fc = nn.Linear(512, num_classes)
def forward(self, x):
x = self.prep_layer(x)
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
x = self.max_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return F.log_softmax(x,dim=1)
|