Spaces:
Runtime error
Runtime error
delete nms add description
Browse files
app.py
CHANGED
@@ -22,7 +22,8 @@ predictor = Predictor(
|
|
22 |
)
|
23 |
|
24 |
|
25 |
-
def image_inference(image, confthre
|
|
|
26 |
cv2.cvtColor(image, cv2.COLOR_RGB2BGR, image)
|
27 |
outputs, img_info = predictor.inference(image, confthre, nmsthre)
|
28 |
result_image = predictor.visual(outputs[0], img_info)
|
@@ -34,18 +35,18 @@ image_interface = gr.Interface(
|
|
34 |
fn=image_inference,
|
35 |
inputs=[
|
36 |
"image",
|
37 |
-
gr.Slider(0.01, 1, value=0.4, step=0.01, label="Confidence Threshold", )
|
38 |
-
gr.Slider(0.01, 1, value=0.01, step=0.01, label="NMS Threshold")
|
39 |
],
|
40 |
-
examples=[["assets/sample.png", 0.4
|
41 |
-
["assets/sample5.jpg", 0.4
|
42 |
-
["assets/sample7.jpg", 0.4
|
43 |
outputs=gr.Image(type="pil"),
|
44 |
-
title="OpenLenda image demo"
|
|
|
45 |
)
|
46 |
|
47 |
|
48 |
-
def video_inference(video_file, confthre,
|
49 |
start_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec))
|
50 |
end_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec + duration))
|
51 |
|
@@ -74,6 +75,7 @@ def video_inference(video_file, confthre, nmsthre, start_sec, duration):
|
|
74 |
except Exception as e:
|
75 |
print(e)
|
76 |
continue
|
|
|
77 |
outputs, img_info = predictor.inference(frame, confthre, nmsthre)
|
78 |
result_frame = predictor.visual(outputs[0], img_info)
|
79 |
out.write(result_frame)
|
@@ -94,12 +96,12 @@ video_interface = gr.Interface(
|
|
94 |
inputs=[
|
95 |
gr.Video(),
|
96 |
gr.Slider(0.01, 1, value=0.5, step=0.01, label="Confidence Threshold", ),
|
97 |
-
gr.Slider(0.01, 1, value=0.01, step=0.01, label="NMS Threshold"),
|
98 |
gr.Slider(0, 60, value=0, step=1, label="Start Second"),
|
99 |
gr.Slider(1, 10, value=3, step=1, label="Duration"),
|
100 |
],
|
101 |
outputs=gr.Video(),
|
102 |
-
title="OpenLenda video demo"
|
|
|
103 |
)
|
104 |
|
105 |
if __name__ == "__main__":
|
|
|
22 |
)
|
23 |
|
24 |
|
25 |
+
def image_inference(image, confthre):
|
26 |
+
nmsthre = 0.01
|
27 |
cv2.cvtColor(image, cv2.COLOR_RGB2BGR, image)
|
28 |
outputs, img_info = predictor.inference(image, confthre, nmsthre)
|
29 |
result_image = predictor.visual(outputs[0], img_info)
|
|
|
35 |
fn=image_inference,
|
36 |
inputs=[
|
37 |
"image",
|
38 |
+
gr.Slider(0.01, 1, value=0.4, step=0.01, label="Confidence Threshold", )
|
|
|
39 |
],
|
40 |
+
examples=[["assets/sample.png", 0.4], ["assets/sample4.jpg", 0.4],
|
41 |
+
["assets/sample5.jpg", 0.4], ["assets/sample6.jpg", 0.4],
|
42 |
+
["assets/sample7.jpg", 0.4], ["assets/sample8.jpg", 0.4]],
|
43 |
outputs=gr.Image(type="pil"),
|
44 |
+
title="OpenLenda image demo",
|
45 |
+
description="You can upload your own image. Use the slider to adjust the confidence threshold. Example images are good for looking at the ability of the model.<br>自由に画像をアップロードできます。スライダーで信頼度の閾値を調整できます。サンプル画像はモデルの能力を見るのにおすすめの画像達です。",
|
46 |
)
|
47 |
|
48 |
|
49 |
+
def video_inference(video_file, confthre, start_sec, duration):
|
50 |
start_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec))
|
51 |
end_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec + duration))
|
52 |
|
|
|
75 |
except Exception as e:
|
76 |
print(e)
|
77 |
continue
|
78 |
+
nmsthre = 0.01
|
79 |
outputs, img_info = predictor.inference(frame, confthre, nmsthre)
|
80 |
result_frame = predictor.visual(outputs[0], img_info)
|
81 |
out.write(result_frame)
|
|
|
96 |
inputs=[
|
97 |
gr.Video(),
|
98 |
gr.Slider(0.01, 1, value=0.5, step=0.01, label="Confidence Threshold", ),
|
|
|
99 |
gr.Slider(0, 60, value=0, step=1, label="Start Second"),
|
100 |
gr.Slider(1, 10, value=3, step=1, label="Duration"),
|
101 |
],
|
102 |
outputs=gr.Video(),
|
103 |
+
title="OpenLenda video demo",
|
104 |
+
description="You can upload your own video. Use the slider to adjust the confidence threshold, start second, and duration.<br>自由に動画をアップロードできます。スライダーで信頼度の閾値、開始秒数、動画の長さを調整できます。",
|
105 |
)
|
106 |
|
107 |
if __name__ == "__main__":
|