sagar007 commited on
Commit
d25c63d
1 Parent(s): 30d5c98

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from PIL import Image
4
+ import numpy as np
5
+ from sam2.sam2_image_predictor import SAM2ImagePredictor
6
+ from huggingface_hub import hf_hub_download
7
+
8
+ # Download the model weights
9
+ model_path = hf_hub_download(repo_id="facebook/sam2-hiera-large", filename="sam2_hiera_large.pth")
10
+
11
+ # Initialize the SAM2 predictor
12
+ predictor = SAM2ImagePredictor.from_pretrained(model_path)
13
+
14
+ def segment_image(input_image, x, y):
15
+ # Convert gradio image to PIL Image
16
+ input_image = Image.fromarray(input_image.astype('uint8'), 'RGB')
17
+
18
+ # Prepare the image for the model
19
+ predictor.set_image(input_image)
20
+
21
+ # Prepare the prompt (point)
22
+ input_point = np.array([[x, y]])
23
+ input_label = np.array([1]) # 1 for foreground
24
+
25
+ # Generate the mask
26
+ with torch.inference_mode():
27
+ masks, _, _ = predictor.predict(point_coords=input_point, point_labels=input_label)
28
+
29
+ # Convert the mask to an image
30
+ mask = masks[0].cpu().numpy()
31
+ mask_image = Image.fromarray((mask * 255).astype(np.uint8))
32
+
33
+ # Apply the mask to the original image
34
+ result = Image.composite(input_image, Image.new('RGB', input_image.size, 'black'), mask_image)
35
+
36
+ return result
37
+
38
+ # Create the Gradio interface
39
+ iface = gr.Interface(
40
+ fn=segment_image,
41
+ inputs=[
42
+ gr.Image(type="numpy"),
43
+ gr.Slider(0, 1000, label="X coordinate"),
44
+ gr.Slider(0, 1000, label="Y coordinate")
45
+ ],
46
+ outputs=gr.Image(type="pil"),
47
+ title="SAM2 Image Segmentation",
48
+ description="Upload an image and select a point to segment. Adjust X and Y coordinates to refine the selection."
49
+ )
50
+
51
+ # Launch the app
52
+ iface.launch()