Spaces:
Sleeping
Sleeping
joselobenitezg
commited on
Commit
•
7a3883a
1
Parent(s):
afe246e
wip
Browse files- app.py +2 -3
- config.py +13 -13
- load_and_test.ipynb +0 -0
- utils/vis_utils.py +41 -1
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
# Part of the
|
2 |
import os
|
3 |
|
4 |
import gradio as gr
|
@@ -54,12 +54,11 @@ def segment(image: Image.Image, model_name: str) -> Image.Image:
|
|
54 |
return blended_image
|
55 |
|
56 |
|
57 |
-
# ----------------- GRADIO UI ----------------- #
|
58 |
def update_model_choices(task):
|
59 |
model_choices = list(SAPIENS_LITE_MODELS_PATH[task.lower()].keys())
|
60 |
return gr.Dropdown(choices=model_choices, value=model_choices[0] if model_choices else None)
|
61 |
|
62 |
-
with gr.Blocks(
|
63 |
gr.Markdown("# Sapiens Arena 🤸🏽♂️ - WIP devmode- Not yet available")
|
64 |
with gr.Tabs():
|
65 |
with gr.TabItem('Image'):
|
|
|
1 |
+
# Part of the code is from: fashn-ai/sapiens-body-part-segmentation
|
2 |
import os
|
3 |
|
4 |
import gradio as gr
|
|
|
54 |
return blended_image
|
55 |
|
56 |
|
|
|
57 |
def update_model_choices(task):
|
58 |
model_choices = list(SAPIENS_LITE_MODELS_PATH[task.lower()].keys())
|
59 |
return gr.Dropdown(choices=model_choices, value=model_choices[0] if model_choices else None)
|
60 |
|
61 |
+
with gr.Blocks() as demo:
|
62 |
gr.Markdown("# Sapiens Arena 🤸🏽♂️ - WIP devmode- Not yet available")
|
63 |
with gr.Tabs():
|
64 |
with gr.TabItem('Image'):
|
config.py
CHANGED
@@ -25,26 +25,26 @@ SAPIENS_LITE_MODELS_URL = {
|
|
25 |
|
26 |
SAPIENS_LITE_MODELS_PATH = {
|
27 |
"depth": {
|
28 |
-
"sapiens_0.3b": "checkpoints/
|
29 |
-
"sapiens_0.6b": "checkpoints/
|
30 |
-
"sapiens_1b": "checkpoints/
|
31 |
-
"sapiens_2b": "checkpoints/
|
32 |
},
|
33 |
"detector": {},
|
34 |
"normal": {
|
35 |
-
"sapiens_0.3b": "checkpoints/
|
36 |
-
"sapiens_0.6b": "checkpoints/
|
37 |
-
"sapiens_1b": "checkpoints/
|
38 |
-
"sapiens_2b": "checkpoints/
|
39 |
},
|
40 |
"pose": {
|
41 |
-
"sapiens_1b": "checkpoints/
|
42 |
},
|
43 |
"seg": {
|
44 |
-
"sapiens_0.3b": "checkpoints/
|
45 |
-
"sapiens_0.6b": "checkpoints/
|
46 |
-
"sapiens_1b": "checkpoints/
|
47 |
-
"sapiens_2b": "checkpoints/
|
48 |
}
|
49 |
}
|
50 |
|
|
|
25 |
|
26 |
SAPIENS_LITE_MODELS_PATH = {
|
27 |
"depth": {
|
28 |
+
"sapiens_0.3b": "checkpoints/depth/sapiens_0.3b_torchscript.pt2",
|
29 |
+
"sapiens_0.6b": "checkpoints/depth/sapiens_0.6b_torchscript.pt2",
|
30 |
+
"sapiens_1b": "checkpoints/depth/sapiens_1b_torchscript.pt2",
|
31 |
+
"sapiens_2b": "checkpoints/depth/sapiens_2b_torchscript.pt2"
|
32 |
},
|
33 |
"detector": {},
|
34 |
"normal": {
|
35 |
+
"sapiens_0.3b": "checkpoints/normal/sapiens_0.3b_torchscript.pt2",
|
36 |
+
"sapiens_0.6b": "checkpoints/normal/sapiens_0.6b_torchscript.pt2",
|
37 |
+
"sapiens_1b": "checkpoints/normal/sapiens_1b_torchscript.pt2",
|
38 |
+
"sapiens_2b": "checkpoints/normal/sapiens_2b_torchscript.pt2"
|
39 |
},
|
40 |
"pose": {
|
41 |
+
"sapiens_1b": "checkpoints/pose/sapiens_1b_torchscript.pt2"
|
42 |
},
|
43 |
"seg": {
|
44 |
+
"sapiens_0.3b": "checkpoints/seg/sapiens_0.3b_torchscript.pt2",
|
45 |
+
"sapiens_0.6b": "checkpoints/seg/sapiens_0.6b_torchscript.pt2",
|
46 |
+
"sapiens_1b": "checkpoints/seg/sapiens_1b_torchscript.pt2",
|
47 |
+
"sapiens_2b": "checkpoints/seg/sapiens_2b_torchscript.pt2"
|
48 |
}
|
49 |
}
|
50 |
|
load_and_test.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
utils/vis_utils.py
CHANGED
@@ -39,4 +39,44 @@ def visualize_mask_with_overlay(img: Image.Image, mask: Image.Image, labels_to_i
|
|
39 |
|
40 |
blended = Image.fromarray(np.uint8(img_np * (1 - alpha) + overlay * alpha))
|
41 |
|
42 |
-
return blended
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
blended = Image.fromarray(np.uint8(img_np * (1 - alpha) + overlay * alpha))
|
41 |
|
42 |
+
return blended
|
43 |
+
|
44 |
+
def resize_image(pil_image, target_size):
|
45 |
+
"""
|
46 |
+
Resize a PIL image while maintaining its aspect ratio.
|
47 |
+
|
48 |
+
Args:
|
49 |
+
pil_image (PIL.Image): The input image.
|
50 |
+
target_size (tuple): The target size as (width, height).
|
51 |
+
|
52 |
+
Returns:
|
53 |
+
PIL.Image: The resized image.
|
54 |
+
"""
|
55 |
+
original_width, original_height = pil_image.size
|
56 |
+
target_width, target_height = target_size
|
57 |
+
|
58 |
+
# Calculate aspect ratios
|
59 |
+
aspect_ratio = original_width / original_height
|
60 |
+
target_aspect = target_width / target_height
|
61 |
+
|
62 |
+
if aspect_ratio > target_aspect:
|
63 |
+
# Image is wider than target, scale based on width
|
64 |
+
new_width = target_width
|
65 |
+
new_height = int(new_width / aspect_ratio)
|
66 |
+
else:
|
67 |
+
# Image is taller than target, scale based on height
|
68 |
+
new_height = target_height
|
69 |
+
new_width = int(new_height * aspect_ratio)
|
70 |
+
|
71 |
+
# Resize the image
|
72 |
+
resized_image = pil_image.resize((new_width, new_height), Image.LANCZOS)
|
73 |
+
|
74 |
+
# Create a new image with the target size and paste the resized image
|
75 |
+
new_image = Image.new('RGB', target_size, (0, 0, 0))
|
76 |
+
paste_x = (target_width - new_width) // 2
|
77 |
+
paste_y = (target_height - new_height) // 2
|
78 |
+
new_image.paste(resized_image, (paste_x, paste_y))
|
79 |
+
|
80 |
+
return new_image
|
81 |
+
|
82 |
+
|