Spaces:
Runtime error
Runtime error
add examples and improve SAM2AutomaticMaskGenerator results
Browse files- README.md +3 -3
- app.py +18 -0
- utils/models.py +10 -1
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
title: Segment Anything 2
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.39.0
|
8 |
app_file: app.py
|
|
|
1 |
---
|
2 |
title: Segment Anything 2
|
3 |
+
emoji: 🔥
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: green
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.39.0
|
8 |
app_file: app.py
|
app.py
CHANGED
@@ -31,6 +31,11 @@ Segment Anything Model 2 (SAM 2) is a foundation model designed to address promp
|
|
31 |
visual segmentation in both images and videos. **Video segmentation will be available
|
32 |
soon.**
|
33 |
"""
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
36 |
MASK_ANNOTATOR = sv.MaskAnnotator(color_lookup=sv.ColorLookup.INDEX)
|
@@ -102,6 +107,19 @@ with gr.Blocks() as demo:
|
|
102 |
value='Submit', variant='primary')
|
103 |
with gr.Column():
|
104 |
image_output_component = gr.Image(type='pil', label='Image Output')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
|
107 |
def on_mode_dropdown_change(text):
|
|
|
31 |
visual segmentation in both images and videos. **Video segmentation will be available
|
32 |
soon.**
|
33 |
"""
|
34 |
+
EXAMPLES = [
|
35 |
+
["tiny", MASK_GENERATION_MODE, "https://media.roboflow.com/notebooks/examples/dog-2.jpeg", None],
|
36 |
+
["tiny", MASK_GENERATION_MODE, "https://media.roboflow.com/notebooks/examples/dog-3.jpeg", None],
|
37 |
+
["tiny", MASK_GENERATION_MODE, "https://media.roboflow.com/notebooks/examples/dog-4.jpeg", None],
|
38 |
+
]
|
39 |
|
40 |
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
41 |
MASK_ANNOTATOR = sv.MaskAnnotator(color_lookup=sv.ColorLookup.INDEX)
|
|
|
107 |
value='Submit', variant='primary')
|
108 |
with gr.Column():
|
109 |
image_output_component = gr.Image(type='pil', label='Image Output')
|
110 |
+
with gr.Row():
|
111 |
+
gr.Examples(
|
112 |
+
fn=process,
|
113 |
+
examples=EXAMPLES,
|
114 |
+
inputs=[
|
115 |
+
checkpoint_dropdown_component,
|
116 |
+
mode_dropdown_component,
|
117 |
+
image_input_component,
|
118 |
+
image_prompter_input_component,
|
119 |
+
],
|
120 |
+
outputs=[image_output_component],
|
121 |
+
run_on_click=True
|
122 |
+
)
|
123 |
|
124 |
|
125 |
def on_mode_dropdown_change(text):
|
utils/models.py
CHANGED
@@ -27,5 +27,14 @@ def load_models(
|
|
27 |
for key, (config, checkpoint) in CHECKPOINTS.items():
|
28 |
model = build_sam2(config, checkpoint, device=device)
|
29 |
image_predictors[key] = SAM2ImagePredictor(sam_model=model)
|
30 |
-
mask_generators[key] = SAM2AutomaticMaskGenerator(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
return image_predictors, mask_generators
|
|
|
27 |
for key, (config, checkpoint) in CHECKPOINTS.items():
|
28 |
model = build_sam2(config, checkpoint, device=device)
|
29 |
image_predictors[key] = SAM2ImagePredictor(sam_model=model)
|
30 |
+
mask_generators[key] = SAM2AutomaticMaskGenerator(
|
31 |
+
model=model,
|
32 |
+
points_per_side=32,
|
33 |
+
points_per_batch=64,
|
34 |
+
pred_iou_thresh=0.7,
|
35 |
+
stability_score_thresh=0.92,
|
36 |
+
stability_score_offset=0.7,
|
37 |
+
crop_n_layers=1,
|
38 |
+
box_nms_thresh=0.7,
|
39 |
+
)
|
40 |
return image_predictors, mask_generators
|