Skip to content

Commit

Permalink
update code
Browse files Browse the repository at this point in the history
  • Loading branch information
krishnateja95 committed Dec 9, 2024
1 parent ac62f65 commit 4de249b
Show file tree
Hide file tree
Showing 729 changed files with 87,839 additions and 3 deletions.
59 changes: 59 additions & 0 deletions Models/CNN_Models/AlexNet/AlexNet_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn

__all__ = ["AlexNet"]


class AlexNet(nn.Module):
def __init__(self, num_classes = 3, dropout = 0.5):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)

def forward(self, x: torch.Tensor):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x




if __name__ == "__main__":
model = AlexNet()
input = torch.randn(1,3,224,224)
output = model(input)
print(input.size(), output.size())
assert output.size()[-1] == 2
print("Model done")





Binary file not shown.
Binary file not shown.
193 changes: 193 additions & 0 deletions Models/CNN_Models/ConvNext_family/ConvNext_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F


__all__ = ["ConvNext_Base"]


def stochastic_depth(input, p, mode, training = True):
if p < 0.0 or p > 1.0:
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}")
if mode not in ["batch", "row"]:
raise ValueError(f"mode has to be either 'batch' or 'row', but got {mode}")
if not training or p == 0.0:
return input

survival_rate = 1.0 - p
if mode == "row":
size = [input.shape[0]] + [1] * (input.ndim - 1)
else:
size = [1] * input.ndim
noise = torch.empty(size, dtype=input.dtype, device=input.device)
noise = noise.bernoulli_(survival_rate)
if survival_rate > 0.0:
noise.div_(survival_rate)
return input * noise


torch.fx.wrap("stochastic_depth")


class StochasticDepth(nn.Module):
def __init__(self, p: float, mode: str) -> None:
super().__init__()
self.p = p
self.mode = mode

def forward(self, input):
return stochastic_depth(input, self.p, self.mode, self.training)

def __repr__(self) -> str:
s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})"
return s


class Permute(torch.nn.Module):
def __init__(self, dims):
super().__init__()
self.dims = dims

def forward(self, x):
return torch.permute(x, self.dims)


class ConvNormActivation(torch.nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size = 3, stride = 1, padding = None,
groups = 1, norm_layer = torch.nn.BatchNorm2d, activation_layer = torch.nn.ReLU, dilation = 1,
inplace = True, bias = None, conv_layer = torch.nn.Conv2d):

if padding is None:
if isinstance(kernel_size, int) and isinstance(dilation, int):
padding = (kernel_size - 1) // 2 * dilation
else:
_conv_dim = len(kernel_size) if isinstance(kernel_size, Sequence) else len(dilation)
kernel_size = _make_ntuple(kernel_size, _conv_dim)
dilation = _make_ntuple(dilation, _conv_dim)
padding = tuple((kernel_size[i] - 1) // 2 * dilation[i] for i in range(_conv_dim))
if bias is None:
bias = norm_layer is None

layers = [conv_layer(in_channels, out_channels, kernel_size, stride, padding, dilation=dilation, groups=groups,bias=bias)]

if norm_layer is not None:
layers.append(norm_layer(out_channels))

if activation_layer is not None:
params = {} if inplace is None else {"inplace": inplace}
layers.append(activation_layer(**params))
super().__init__(*layers)
self.out_channels = out_channels




class LayerNorm2d(nn.LayerNorm):
def forward(self, x):
x = x.permute(0, 2, 3, 1)
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
x = x.permute(0, 3, 1, 2)
return x


class CNBlock(nn.Module):
def __init__(self, dim, layer_scale, stochastic_depth_prob, norm_layer = None):
super().__init__()
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-6)

self.block = nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim, bias=True),
Permute([0, 2, 3, 1]),
norm_layer(dim),
nn.Linear(in_features=dim, out_features=4 * dim, bias=True),
nn.GELU(),
nn.Linear(in_features=4 * dim, out_features=dim, bias=True),
Permute([0, 3, 1, 2]),
)
self.layer_scale = nn.Parameter(torch.ones(dim, 1, 1) * layer_scale)
self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")

def forward(self, input):
result = self.layer_scale * self.block(input)
result = self.stochastic_depth(result)
result += input
return result


class CNBlockConfig:
def __init__(self, input_channels, out_channels, num_layers):

self.input_channels = input_channels
self.out_channels = out_channels
self.num_layers = num_layers


class ConvNext_Base(nn.Module):
def __init__(self, stochastic_depth_prob= 0.5, layer_scale = 1e-6, num_classes = 3, block = None, norm_layer = None):
super().__init__()

block_setting = [
CNBlockConfig(128, 256, 3),
CNBlockConfig(256, 512, 3),
CNBlockConfig(512, 1024, 27),
CNBlockConfig(1024, None, 3)]

if block is None:
block = CNBlock

if norm_layer is None:
norm_layer = partial(LayerNorm2d, eps=1e-6)

layers: List[nn.Module] = []

# Stem
firstconv_output_channels = block_setting[0].input_channels
layers.append(ConvNormActivation(3, firstconv_output_channels, kernel_size=4, stride=4, padding=0,
norm_layer=norm_layer, activation_layer=None, bias=True))

total_stage_blocks = sum(cnf.num_layers for cnf in block_setting)
stage_block_id = 0
for cnf in block_setting:
# Bottlenecks
stage: List[nn.Module] = []
for _ in range(cnf.num_layers):
sd_prob = stochastic_depth_prob * stage_block_id / (total_stage_blocks - 1.0)
stage.append(block(cnf.input_channels, layer_scale, sd_prob))
stage_block_id += 1
layers.append(nn.Sequential(*stage))
if cnf.out_channels is not None:
# Downsampling
layers.append(nn.Sequential(norm_layer(cnf.input_channels),
nn.Conv2d(cnf.input_channels, cnf.out_channels, kernel_size=2, stride=2)))

self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)

lastblock = block_setting[-1]
lastconv_output_channels = (lastblock.out_channels if lastblock.out_channels is not None else lastblock.input_channels)
self.classifier = nn.Sequential(norm_layer(lastconv_output_channels), nn.Flatten(1),
nn.Linear(lastconv_output_channels, num_classes))

for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)

def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = self.classifier(x)
return x


if __name__ == "__main__":
model = ConvNext_Base()
input = torch.randn(1,3,224,224)
output = model(input)
print(input.size(), output.size())
assert output.size()[-1] == 3
print("Model done")

Loading

0 comments on commit 4de249b

Please sign in to comment.