Finetuned-MobilVIT / app (2).py
Nekshay's picture
Upload 2 files
901fde8
raw
history blame
2.32 kB
try:
import detectron2
except:
import os
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
from matplotlib.pyplot import axis
import gradio as gr
import requests
import numpy as np
from torch import nn
import requests
import torch
import detectron2
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import ColorMode
model_path = 'model_final.pth'
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.75
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 19
cfg.MODEL.WEIGHTS = model_path
if not torch.cuda.is_available():
cfg.MODEL.DEVICE='cpu'
predictor = DefaultPredictor(cfg)
my_metadata = MetadataCatalog.get("car_part_merged_dataset_val")
my_metadata.thing_classes = ['_background_',
'back_bumper',
'back_glass',
'back_left_door',
'back_left_light',
'back_right_door',
'back_right_light',
'front_bumper',
'front_glass',
'front_left_door',
'front_left_light',
'front_right_door',
'front_right_light',
'hood',
'left_mirror',
'right_mirror',
'tailgate',
'trunk',
'wheel']
def inference(image):
print(image.height)
height = image.height
# img = np.array(image.resize((500, height)))
img = np.array(image)
outputs = predictor(img)
v = Visualizer(img[:, :, ::-1],
metadata=my_metadata,
scale=0.5,
instance_mode=ColorMode.SEGMENTATION # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
#v = Visualizer(img,scale=1.2)
#print(outputs["instances"].to('cpu'))
out = v.draw_instance_predictions(outputs["instances"])
return out.get_image()[:, :, ::-1]
title = "Detectron2 Car Parts Detection"
description = "This demo introduces an interactive playground for our trained Detectron2 model."
gr.Interface(
inference,
[gr.inputs.Image(type="pil", label="Input")],
gr.outputs.Image(type="numpy", label="Output"),
title=title,
description=description,
examples=[]).launch()