anilbhatt1 commited on
Commit
b447fcc
1 Parent(s): 2e177b5

initial commit

Browse files
Files changed (5) hide show
  1. app.py +66 -0
  2. cat.jpg +0 -0
  3. dog.jpg +0 -0
  4. model.py +70 -0
  5. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, torchvision
2
+ from torchvision import transforms
3
+ import numpy as np
4
+ import gradio as gr
5
+ from PIL import Image
6
+ import os
7
+ import lightning as L
8
+ import torchmetrics
9
+ from pytorch_grad_cam import GradCAM
10
+ from pytorch_grad_cam.utils.image import show_cam_on_image
11
+ from model import LightningModel
12
+
13
+ pytorch_model = torch.hub.load('pytorch/vision', 'resnet18', weights=None)
14
+ pytorch_model.fc = torch.nn.Linear(512, 10)
15
+ model_pth = './epoch=22-step=16169.ckpt'
16
+ lightning_model = LightningModel.load_from_checkpoint(checkpoint_path=model_pth, model=pytorch_model, map_location=torch.device("cpu"))
17
+
18
+ inv_normalize = transforms.Normalize(
19
+ mean=[-0.50/0.23, -0.50/0.23, -0.50/0.23],
20
+ std=[1/0.23, 1/0.23, 1/0.23]
21
+ )
22
+ classes = ('plane', 'car', 'bird', 'cat', 'deer',
23
+ 'dog', 'frog', 'horse', 'ship', 'truck')
24
+
25
+ def inference(input_img, transparency = 0.5, target_layer_number = -1):
26
+ transform = transforms.ToTensor()
27
+ org_img = input_img
28
+ input_img = transform(input_img)
29
+ input_img = input_img
30
+ input_img = input_img.unsqueeze(0)
31
+ lightning_model.eval()
32
+ with torch.no_grad():
33
+ outputs = lightning_model(input_img)
34
+ print(f'outputs, {outputs.shape}')
35
+ softmax = torch.nn.Softmax(dim=0)
36
+ print()
37
+ o = softmax(outputs.flatten())
38
+ confidences = {classes[i]: float(o[i]) for i in range(10)}
39
+ _, prediction = torch.max(outputs, 1)
40
+ target_layers = [pytorch_model.layer2[target_layer_number]]
41
+ cam = GradCAM(model=lightning_model, target_layers=target_layers, use_cuda=False)
42
+ grayscale_cam = cam(input_tensor=input_img, targets=None)
43
+ grayscale_cam = grayscale_cam[0, :]
44
+ img = input_img.squeeze(0)
45
+ img = inv_normalize(img)
46
+ rgb_img = np.transpose(img, (1, 2, 0))
47
+ rgb_img = rgb_img.numpy()
48
+ visualization = show_cam_on_image(org_img/255, grayscale_cam, use_rgb=True, image_weight=transparency)
49
+ print(confidences)
50
+ return confidences, visualization
51
+
52
+ title = "CIFAR10 trained on ResNet18 Model with GradCAM"
53
+ description = "A simple Gradio interface to infer on ResNet model, and get GradCAM results"
54
+ example1 = './cat.jpg'
55
+ example2 = './dog.jpg'
56
+ examples = [[example1, 0.5, -1], [example2, 0.5, -1]]
57
+ gradio_app = gr.Interface(
58
+ inference,
59
+ inputs = [gr.Image(shape=(32, 32), label="Input Image"), gr.Slider(0, 1, value = 0.5, label="Opacity of GradCAM"), gr.Slider(-2, -1, value = -2, step=1,
60
+ label="Which Layer?")],
61
+ outputs = [gr.Label(num_top_classes=3), gr.Image(shape=(32, 32), label="Output").style(width=128, height=128)],
62
+ title = title,
63
+ description = description,
64
+ examples = examples,
65
+ )
66
+ gradio_app.launch()
cat.jpg ADDED
dog.jpg ADDED
model.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import lightning as L
3
+ import torchmetrics
4
+
5
+
6
+ class LightningModel(L.LightningModule):
7
+ def __init__(self, model, learning_rate, cosine_t_max, mode):
8
+ super().__init__()
9
+
10
+ self.learning_rate = learning_rate
11
+ self.cosine_t_max = cosine_t_max
12
+ self.model = model
13
+ self.example_input_array = torch.Tensor(1, 3, 32, 32)
14
+ self.mode = mode
15
+
16
+ self.save_hyperparameters(ignore=["model"])
17
+
18
+ self.train_acc = torchmetrics.Accuracy(task="multiclass", num_classes=10)
19
+ self.val_acc = torchmetrics.Accuracy(task="multiclass", num_classes=10)
20
+ self.test_acc = torchmetrics.Accuracy(task="multiclass", num_classes=10)
21
+
22
+ def forward(self, x):
23
+ return self.model(x)
24
+
25
+ def _shared_step(self, batch):
26
+ features, true_labels = batch
27
+ logits = self(features)
28
+
29
+ loss = F.cross_entropy(logits, true_labels)
30
+ predicted_labels = torch.argmax(logits, dim=1)
31
+ return loss, true_labels, predicted_labels
32
+
33
+ def training_step(self, batch, batch_idx):
34
+ loss, true_labels, predicted_labels = self._shared_step(batch)
35
+
36
+ self.log("train_loss", loss)
37
+ self.train_acc(predicted_labels, true_labels)
38
+ self.log(
39
+ "train_acc", self.train_acc, prog_bar=True, on_epoch=True, on_step=False
40
+ )
41
+ return loss
42
+
43
+ def validation_step(self, batch, batch_idx):
44
+ loss, true_labels, predicted_labels = self._shared_step(batch)
45
+
46
+ self.log("val_loss", loss, prog_bar=True)
47
+ self.val_acc(predicted_labels, true_labels)
48
+ self.log("val_acc", self.val_acc, prog_bar=True)
49
+
50
+ def test_step(self, batch, batch_idx):
51
+ loss, true_labels, predicted_labels = self._shared_step(batch)
52
+ self.test_acc(predicted_labels, true_labels)
53
+ self.log("test_acc", self.test_acc)
54
+
55
+ def configure_optimizers(self):
56
+ opt = torch.optim.SGD(self.parameters(), lr=self.learning_rate)
57
+ if self.mode == 'lrfind':
58
+ return opt
59
+ else:
60
+ sch = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=self.cosine_t_max) # New!
61
+
62
+ return {
63
+ "optimizer": opt,
64
+ "lr_scheduler": {
65
+ "scheduler": sch,
66
+ "monitor": "train_loss",
67
+ "interval": "step", # step means "batch" here, default: epoch
68
+ "frequency": 1, # default
69
+ },
70
+ }
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ grad-cam
4
+ lightning
5
+ torchvision
6
+ pillow
7
+ numpy