ditobprasetio commited on
Commit
fa2a7b2
1 Parent(s): 8ffceae

add application files

Browse files
Files changed (2) hide show
  1. app.py +75 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import torchvision.transforms as transforms
4
+ import numpy as np
5
+ import gradio as gr
6
+ from torch import nn
7
+ from gradio import components
8
+ from PIL import Image
9
+
10
+ class BrainTumorClassifier(nn.Module):
11
+ def __init__(self, num_classes):
12
+ super(BrainTumorClassifier, self).__init__()
13
+ self.features = nn.Sequential(
14
+ nn.Conv2d(3, 20, kernel_size=3, padding=1),
15
+ nn.ReLU(),
16
+ nn.MaxPool2d(2, 2),
17
+ nn.Conv2d(20, 32, kernel_size=3, padding=1),
18
+ nn.ReLU(),
19
+ nn.MaxPool2d(2, 2)
20
+ )
21
+ self.classifier = nn.Sequential(
22
+ nn.Linear(32 * 56 * 56, 128), # Adjust input size based on image size
23
+ nn.ReLU(),
24
+ nn.Linear(128, num_classes)
25
+ )
26
+
27
+ def forward(self, x):
28
+ x = self.features(x)
29
+ x = x.view(-1, 32 * 56 * 56)
30
+ x = self.classifier(x)
31
+ return x
32
+
33
+ def predict(image):
34
+ image = Image.fromarray(np.uint8(image)).convert('RGB')
35
+ ## give the weights trained
36
+ model_path = 'cnn_tumorbrain_classifier_self.pth'
37
+ model_load = BrainTumorClassifier(4)
38
+ model_load.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
39
+
40
+ ## put the model in evaluation mode
41
+ model_load.eval()
42
+
43
+ transform_pipeline = transforms.Compose([
44
+ transforms.Resize((224,224)),
45
+ transforms.ToTensor(),
46
+ transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
47
+ ])
48
+
49
+ ## transform the img like the training image
50
+ input_img = transform_pipeline(image).unsqueeze(0)
51
+ # input_img
52
+
53
+ ## define the label by index
54
+ class_to_label = {0: 'glioma', 1: 'meningioma', 2: 'notumor', 3: 'pituitary'}
55
+
56
+ ## run the model
57
+ with torch.no_grad():
58
+ output = model_load(input_img)
59
+
60
+ ## convert to the softmax for getting percent each label
61
+ probabilities = F.softmax(output, dim=1)
62
+
63
+ ## get predicted label with highest value
64
+ _, predicted_label = torch.max(probabilities,1)
65
+ # confidence_percent = probabilities[0].tolist()[predicted_label.item()]
66
+ conf, _ = torch.max(probabilities, 1)
67
+
68
+ result = "{}, with confidence level in {}%".format(class_to_label[predicted_label.item()], conf.item()*100)
69
+ return result
70
+
71
+ iface = gr.Interface(fn=predict,
72
+ inputs=gr.Image(),
73
+ outputs="textbox")
74
+
75
+ iface.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ torchvision
3
+ numpy