Spaces:
Running
Running
sandrocalzada
commited on
Commit
•
5990ce9
1
Parent(s):
1dce026
Update app.py
Browse files
app.py
CHANGED
@@ -18,12 +18,60 @@ def predict_gender(image):
|
|
18 |
# Assuming binary classification with a single output neuron
|
19 |
return "Male" if prediction[0] < 0.5 else "Female"
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
# Assuming binary classification with a single output neuron
|
19 |
return "Male" if prediction[0] < 0.5 else "Female"
|
20 |
|
21 |
+
|
22 |
+
def predict(video_in, image_in_video, image_in_img):
|
23 |
+
if video_in == None and image_in_video == None and image_in_img == None:
|
24 |
+
raise gr.Error("Please upload a video or image.")
|
25 |
+
if image_in_video or image_in_img:
|
26 |
+
print("image", image_in_video, image_in_img)
|
27 |
+
image = image_in_video or image_in_img
|
28 |
+
return image
|
29 |
+
|
30 |
+
return video_in
|
31 |
+
|
32 |
+
|
33 |
+
def toggle(choice):
|
34 |
+
if choice == "webcam":
|
35 |
+
return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
|
36 |
+
else:
|
37 |
+
return gr.update(visible=False, value=None), gr.update(visible=True, value=None)
|
38 |
+
|
39 |
+
|
40 |
+
with gr.Blocks() as blocks:
|
41 |
+
gr.Markdown("### Video or Image? WebCam or Upload?""")
|
42 |
+
with gr.Tab("Video") as tab:
|
43 |
+
with gr.Row():
|
44 |
+
with gr.Column():
|
45 |
+
video_or_file_opt = gr.Radio(["webcam", "upload"], value="webcam",
|
46 |
+
label="How would you like to upload your video?")
|
47 |
+
video_in = gr.Video(source="webcam", include_audio=False)
|
48 |
+
video_or_file_opt.change(fn=lambda s: gr.update(source=s, value=None), inputs=video_or_file_opt,
|
49 |
+
outputs=video_in, queue=False, show_progress=False)
|
50 |
+
with gr.Column():
|
51 |
+
video_out = gr.Video()
|
52 |
+
run_btn = gr.Button("Run")
|
53 |
+
run_btn.click(fn=predict, inputs=[video_in], outputs=[video_out])
|
54 |
+
gr.Examples(fn=predict, examples=[], inputs=[
|
55 |
+
video_in], outputs=[video_out])
|
56 |
+
|
57 |
+
with gr.Tab("Image"):
|
58 |
+
with gr.Row():
|
59 |
+
with gr.Column():
|
60 |
+
image_or_file_opt = gr.Radio(["webcam", "file"], value="webcam",
|
61 |
+
label="How would you like to upload your image?")
|
62 |
+
image_in_video = gr.Image(source="webcam", type="filepath")
|
63 |
+
image_in_img = gr.Image(
|
64 |
+
source="upload", visible=False, type="filepath")
|
65 |
+
|
66 |
+
image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
|
67 |
+
outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
|
68 |
+
with gr.Column():
|
69 |
+
image_out = gr.Image()
|
70 |
+
run_btn = gr.Button("Run")
|
71 |
+
run_btn.click(fn=predict, inputs=[
|
72 |
+
image_in_img, image_in_video], outputs=[image_out])
|
73 |
+
gr.Examples(fn=predict, examples=[], inputs=[
|
74 |
+
image_in_img, image_in_video], outputs=[image_out])
|
75 |
+
|
76 |
+
blocks.queue()
|
77 |
+
blocks.launch()
|