File size: 1,458 Bytes
923429f
115a793
923429f
 
 
 
 
 
115a793
923429f
eaa6868
 
923429f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import torch
import os
import gradio as gr
from huggingface_hub import hf_hub_download
from PIL import Image

REPO_ID = "owaiskha9654/Yolov7_Custom_Object_Detection" 
FILENAME = "best.pt"
print(os.getcwd())
yolov7_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
model = torch.hub.load('jinfagang/yolov7', 'custom', path=yolov7_weights, force_reload=False)  # local repo
print(l_files)

def object_detection(im, size=416):
    results = model(im)  # inference
    #results.print()  # print results to screen
    #results.show()  # display results
    #results.save()  # save as results1.jpg, results2.jpg... etc.
    results.render()  # updates results.imgs with boxes and labels
    return Image.fromarray(results.imgs[0])

title = "Identificação de Defeitos em Banana"
description = """Esse modelo é uma pequena demonstração baseada em uma análise de cerca de 60 imagens somente. Para resultados mais confiáveis e genéricos, são necessários mais exemplos (imagens).
"""

image = gr.inputs.Image(shape=(416, 416), image_mode="RGB", source="upload", label="Image", optional=False)
outputs = gr.outputs.Image(type="pil", label="Output Image")

gr.Interface(
    fn=object_detection,
    inputs=image,
    outputs=outputs,
    title=title,
    description=description,
    examples=[["sample_images/IMG_0125.JPG"], ["sample_images/IMG_0129.JPG"], 
              ["sample_images/IMG_0157.JPG"], ["sample_images/IMG_0158.JPG"]],
).launch()