File size: 1,868 Bytes
5f0e5c0
 
 
 
 
 
 
 
 
36a45f4
 
 
 
5f0e5c0
 
 
 
 
 
 
 
36a45f4
 
 
5f0e5c0
b314aff
5f0e5c0
 
 
15b93c3
5f0e5c0
d1ca099
0eb2a2b
d1ca099
0eb2a2b
 
 
5f0e5c0
 
 
f9ca340
5f0e5c0
 
 
 
85cdc3d
5f0e5c0
 
 
4f40672
39fff43
5f0e5c0
 
 
2ac89d4
85cdc3d
2ac89d4
f9ca340
5f0e5c0
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
import timm
import torch
import torch.nn as nn
from torchvision import datasets, transforms
from PIL import Image

from torch.utils.mobile_optimizer import optimize_for_mobile

model = timm.create_model('resnet50', pretrained=True)
model.fc = torch.nn.Linear(in_features=model.fc.in_features, out_features=5)
path = "epoch_4_Resnet50-0.5contrast.pth"
model.load_state_dict(torch.load(path))

model.eval()

def transform_image(img_sample):
    transform = transforms.Compose([
        transforms.Resize((224, 224)),  # Resize to 224x224
        transforms.ToTensor(),  # Convert PIL image to tensor
        transforms.ColorJitter(contrast=0.5),  # Contrast
        #transforms.RandomAdjustSharpness(sharpness_factor=0.5),
        #transforms.RandomSolarize(threshold=0.75),
        #transforms.RandomAutocontrast(p=1),
    ])
    transformed_img = transform(img_sample)
    return transformed_img

def predict(Image):
    tranformed_img = transform_image(Image)
    model.eval()
    img = transform_image(Image)
    img = img.reshape(1,3,224,224)
    #img = torch.from_numpy(tranformed_img)
    #outputs = model(img)
    #class_out = outputs.argmax(dim=1)


    with torch.no_grad():
        grade = torch.softmax(model(img.float()), dim=1)[0]
    category = ["0 - Normal", "1 - Mild", "2 - Moderate", "3 - Severe", "4 - Proliferative"]
    output_dict = {}
    for cat, value in zip(category, grade):
        output_dict[cat] = value.item()
    return output_dict




image = gr.Image(type="pil")#shape=(224, 224), image_mode="RGB")
label = gr.Label(label="Level")

demo = gr.Interface(
    fn=predict,
    inputs=image,
    outputs=label,
    #examples=["examples/0.png", "examples/1.png", "examples/2.png", "examples/3.png", "examples/4.png"]
    examples=["0.png", "2.png", "4.png"]
    )

if __name__ == "__main__":
    demo.launch(debug=True)