Spaces:
Sleeping
Sleeping
SatyamSinghal
commited on
Commit
•
d763211
1
Parent(s):
890691b
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
|
|
2 |
import cv2
|
3 |
import torch
|
4 |
import numpy as np
|
|
|
5 |
|
6 |
# Load the YOLOv5 model
|
7 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
@@ -18,7 +19,7 @@ def run_inference(image):
|
|
18 |
annotated_image = results.render()[0]
|
19 |
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
|
20 |
|
21 |
-
return annotated_image
|
22 |
|
23 |
# Function to generate a summary for the detected objects
|
24 |
def generate_summary(image):
|
@@ -29,76 +30,95 @@ def generate_summary(image):
|
|
29 |
summary += f"- {obj['name']} with confidence {obj['confidence']:.2f}\n"
|
30 |
return summary
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
# Create the Gradio interface with improved UI
|
33 |
with gr.Blocks(css="""
|
34 |
body {
|
35 |
font-family: 'Poppins', sans-serif;
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
}
|
39 |
-
|
40 |
-
background-color: #83A0A0;
|
41 |
-
padding: 20px;
|
42 |
text-align: center;
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
46 |
}
|
47 |
footer {
|
48 |
-
background-color: #4C5F6B;
|
49 |
-
padding: 10px;
|
50 |
text-align: center;
|
51 |
-
border-radius: 10px;
|
52 |
-
color: white;
|
53 |
margin-top: 20px;
|
54 |
-
|
|
|
|
|
|
|
|
|
55 |
}
|
56 |
-
.
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
font-weight: bold;
|
62 |
border: none;
|
63 |
-
|
64 |
-
transition: all 0.3s;
|
65 |
}
|
66 |
-
.
|
67 |
-
background-color: #
|
68 |
-
|
|
|
69 |
}
|
70 |
.gr-box {
|
71 |
-
background
|
|
|
72 |
border-radius: 10px;
|
73 |
-
padding:
|
74 |
-
color: #F9B9D2;
|
75 |
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
|
76 |
-
|
77 |
-
.gr-input {
|
78 |
-
background-color: #BCA0BC;
|
79 |
-
border-radius: 5px;
|
80 |
-
border: none;
|
81 |
-
padding: 10px;
|
82 |
-
color: #2B3D41;
|
83 |
}
|
84 |
""") as demo:
|
85 |
with gr.Row():
|
86 |
-
gr.Markdown("<h1
|
87 |
-
|
88 |
with gr.Row():
|
89 |
with gr.Column(scale=2):
|
90 |
-
image_input = gr.Image(label="Upload Image", type="pil", elem_classes="gr-
|
91 |
-
|
92 |
-
detect_button = gr.Button("Run Detection", elem_classes="btn-primary")
|
93 |
with gr.Column(scale=3):
|
94 |
annotated_image_output = gr.Image(label="Detected Image", type="pil", elem_classes="gr-box")
|
95 |
summary_output = gr.Textbox(label="Detection Summary", lines=10, interactive=False, elem_classes="gr-box")
|
|
|
96 |
|
97 |
# Actions for buttons
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
detect_button.click(
|
99 |
-
fn=
|
100 |
inputs=[image_input],
|
101 |
-
outputs=[annotated_image_output, summary_output]
|
102 |
)
|
103 |
|
104 |
gr.Markdown("<footer>Made with ❤️ using Gradio and YOLOv5 | © 2024 InsightVision</footer>")
|
|
|
2 |
import cv2
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
|
7 |
# Load the YOLOv5 model
|
8 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
|
|
19 |
annotated_image = results.render()[0]
|
20 |
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
|
21 |
|
22 |
+
return Image.fromarray(annotated_image)
|
23 |
|
24 |
# Function to generate a summary for the detected objects
|
25 |
def generate_summary(image):
|
|
|
30 |
summary += f"- {obj['name']} with confidence {obj['confidence']:.2f}\n"
|
31 |
return summary
|
32 |
|
33 |
+
# Function to generate a scene description based on the summary
|
34 |
+
def generate_scene_description(summary):
|
35 |
+
if "person" in summary.lower():
|
36 |
+
return "This scene might involve people interacting or a social gathering."
|
37 |
+
elif "car" in summary.lower() or "truck" in summary.lower():
|
38 |
+
return "This could be a street scene or a transportation-related scenario."
|
39 |
+
elif "dog" in summary.lower() or "cat" in summary.lower():
|
40 |
+
return "This appears to involve pets or animals, possibly in a domestic or outdoor setting."
|
41 |
+
else:
|
42 |
+
return "This scene involves various objects. It could be a dynamic or static environment."
|
43 |
+
|
44 |
# Create the Gradio interface with improved UI
|
45 |
with gr.Blocks(css="""
|
46 |
body {
|
47 |
font-family: 'Poppins', sans-serif;
|
48 |
+
margin: 0;
|
49 |
+
background: linear-gradient(135deg, #3D52A0, #7091E6, #8697C4, #ADBBDA, #EDE8F5);
|
50 |
+
background-size: 400% 400%;
|
51 |
+
animation: gradient-animation 15s ease infinite;
|
52 |
+
color: #FFFFFF;
|
53 |
+
}
|
54 |
+
@keyframes gradient-animation {
|
55 |
+
0% { background-position: 0% 50%; }
|
56 |
+
50% { background-position: 100% 50%; }
|
57 |
+
100% { background-position: 0% 50%; }
|
58 |
}
|
59 |
+
h1 {
|
|
|
|
|
60 |
text-align: center;
|
61 |
+
color: #FFFFFF;
|
62 |
+
font-size: 2.5em;
|
63 |
+
font-weight: bold;
|
64 |
+
margin-bottom: 0.5em;
|
65 |
+
text-shadow: 2px 2px 5px rgba(0, 0, 0, 0.3);
|
66 |
}
|
67 |
footer {
|
|
|
|
|
68 |
text-align: center;
|
|
|
|
|
69 |
margin-top: 20px;
|
70 |
+
padding: 10px;
|
71 |
+
font-size: 1em;
|
72 |
+
color: #FFFFFF;
|
73 |
+
background: rgba(61, 82, 160, 0.8);
|
74 |
+
border-radius: 8px;
|
75 |
}
|
76 |
+
.gr-button {
|
77 |
+
font-size: 1em;
|
78 |
+
padding: 12px 24px;
|
79 |
+
background-color: #7091E6;
|
80 |
+
color: #FFFFFF;
|
|
|
81 |
border: none;
|
82 |
+
border-radius: 5px;
|
83 |
+
transition: all 0.3s ease-in-out;
|
84 |
}
|
85 |
+
.gr-button:hover {
|
86 |
+
background-color: #8697C4;
|
87 |
+
transform: scale(1.05);
|
88 |
+
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2);
|
89 |
}
|
90 |
.gr-box {
|
91 |
+
background: rgba(255, 255, 255, 0.1);
|
92 |
+
border: 1px solid rgba(255, 255, 255, 0.3);
|
93 |
border-radius: 10px;
|
94 |
+
padding: 15px;
|
|
|
95 |
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
|
96 |
+
color: #FFFFFF;
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
}
|
98 |
""") as demo:
|
99 |
with gr.Row():
|
100 |
+
gr.Markdown("<h1>✨ InsightVision: Detect, Analyze, Summarize ✨</h1>")
|
101 |
+
|
102 |
with gr.Row():
|
103 |
with gr.Column(scale=2):
|
104 |
+
image_input = gr.Image(label="Upload Image", type="pil", elem_classes="gr-box")
|
105 |
+
detect_button = gr.Button("Run Detection", elem_classes="gr-button")
|
|
|
106 |
with gr.Column(scale=3):
|
107 |
annotated_image_output = gr.Image(label="Detected Image", type="pil", elem_classes="gr-box")
|
108 |
summary_output = gr.Textbox(label="Detection Summary", lines=10, interactive=False, elem_classes="gr-box")
|
109 |
+
scene_description_output = gr.Textbox(label="Scene Description", lines=5, interactive=False, elem_classes="gr-box")
|
110 |
|
111 |
# Actions for buttons
|
112 |
+
def detect_and_process(image):
|
113 |
+
annotated_image = run_inference(image)
|
114 |
+
summary = generate_summary(np.array(image))
|
115 |
+
scene_description = generate_scene_description(summary)
|
116 |
+
return annotated_image, summary, scene_description
|
117 |
+
|
118 |
detect_button.click(
|
119 |
+
fn=detect_and_process,
|
120 |
inputs=[image_input],
|
121 |
+
outputs=[annotated_image_output, summary_output, scene_description_output]
|
122 |
)
|
123 |
|
124 |
gr.Markdown("<footer>Made with ❤️ using Gradio and YOLOv5 | © 2024 InsightVision</footer>")
|