Spaces:
Sleeping
Sleeping
File size: 1,410 Bytes
c3e75ac 63debe3 c3e75ac 63debe3 c3e75ac 9a4023b 3299692 9a4023b 3299692 54f8640 6ab4125 3f76519 c3e75ac 8b6aa34 c3e75ac 8b14234 c3e75ac 2f156d5 c3e75ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import torch
from torch import nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from pathlib import Path
import gradio as gr
from PIL import Image
import numpy as np
LABELS = Path('classes.txt').read_text().splitlines()
num_classes = len(LABELS)
model = nn.Sequential(
nn.Conv2d(1, 64, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(128, 256, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(2304, 512),
nn.ReLU(),
nn.Linear(512, num_classes),
)
state_dict = torch.load('model.pth', map_location='cpu')
model.load_state_dict(state_dict, strict=False)
model.eval()
transform = transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
def predict(image):
image = image['composite']
image = Image.fromarray(image).convert('L')
input = transform(image).unsqueeze(0)
with torch.no_grad():
out = model(input)
print(out.shape)
probabilities = F.softmax(out[0], dim=0)
values, indices = torch.topk(probabilities, 5)
print(values, indices)
return {LABELS[i]: v.item() for i, v in zip(indices, values)}
interface = gr.Interface(predict, inputs='sketchpad', outputs='label', live=True)
interface.launch(debug=True) |