amaye15 commited on
Commit
a25f677
1 Parent(s): 071dd3c

App - V2 - Improved File Formats & UI

Browse files
Files changed (4) hide show
  1. .DS_Store +0 -0
  2. app.py +81 -65
  3. check.py +0 -10
  4. requirements.txt +3 -1
.DS_Store ADDED
Binary file (6.15 kB). View file
 
app.py CHANGED
@@ -3,24 +3,25 @@ from gradio_image_prompter import ImagePrompter
3
  import torch
4
  import numpy as np
5
  from sam2.sam2_image_predictor import SAM2ImagePredictor
6
- from PIL import Image
7
  from uuid import uuid4
8
  import os
9
  from huggingface_hub import upload_folder, login
 
 
10
  import shutil
 
11
 
12
  MODEL = "facebook/sam2-hiera-large"
13
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
  PREDICTOR = SAM2ImagePredictor.from_pretrained(MODEL, device=DEVICE)
15
 
 
16
 
17
- login(os.getenv("TOKEN"))
18
-
19
- GLOBALS = {}
20
-
21
 
22
  IMAGE = None
23
  MASKS = None
 
24
  INDEX = None
25
 
26
 
@@ -44,20 +45,20 @@ def prompter(prompts):
44
  print(f"Predicted Mask {i+1}:", mask.shape)
45
  red_mask = np.zeros_like(image)
46
  red_mask[:, :, 0] = mask.astype(np.uint8) * 255 # Apply the red channel
47
- red_mask = Image.fromarray(red_mask)
48
 
49
  # Convert the original image to a PIL image
50
- original_image = Image.fromarray(image)
51
 
52
  # Blend the original image with the red mask
53
- blended_image = Image.blend(original_image, red_mask, alpha=0.5)
54
 
55
  # Add the blended image to the list
56
  overlay_images.append(blended_image)
57
 
58
- global IMAGE, MASKS
59
-
60
  IMAGE, MASKS = image, masks
 
61
 
62
  return overlay_images[0], overlay_images[1], overlay_images[2], masks
63
 
@@ -80,82 +81,63 @@ def save_selected_mask(image, mask, output_dir="output"):
80
 
81
  os.makedirs(output_dir, exist_ok=True)
82
 
83
- # Generate a unique UUID for the folder name
84
  folder_id = str(uuid4())
85
 
86
- # Create a path for the new folder
87
  folder_path = os.path.join(output_dir, folder_id)
88
 
89
- # Ensure the folder is created
90
  os.makedirs(folder_path, exist_ok=True)
91
 
92
- # Define the paths for saving the image and mask
93
- image_path = os.path.join(folder_path, "image.npy")
94
- mask_path = os.path.join(folder_path, "mask.npy")
95
-
96
- # Save the image and mask to the respective paths
97
- with open(image_path, "wb") as f:
98
- np.save(f, IMAGE)
 
 
 
 
 
 
 
 
 
 
99
 
100
- with open(mask_path, "wb") as f:
101
- np.save(f, MASKS[INDEX])
102
 
103
- # Upload the folder to the Hugging Face Hub
104
  upload_folder(
105
  folder_path=output_dir,
106
- # path_in_repo=path_in_repo,
107
- repo_id="amaye15/object-segmentation",
108
  repo_type="dataset",
109
- # ignore_patterns="**/logs/*.txt", # Adjust this if needed
110
  )
111
 
112
  shutil.rmtree(folder_path)
113
 
114
- return f"Image and mask saved to {folder_path}."
115
 
 
116
 
117
- def save_dataset_name(key, dataset_name):
118
- global GLOBALS
119
- GLOBALS[key] = dataset_name
120
 
121
- iframe_code = f"""
122
- <iframe
123
- src="https://huggingface.co/datasets/{dataset_name}/embed/viewer/default/train"
124
- frameborder="0"
125
- width="100%"
126
- height="560px"
127
- ></iframe>
128
- """
129
- return f"Huggingface Dataset: {dataset_name}", iframe_code
 
130
 
131
 
132
  # Define the Gradio Blocks app
133
  with gr.Blocks() as demo:
134
- with gr.Tab("Setup"):
135
- with gr.Row():
136
- with gr.Column():
137
- source = gr.Textbox(label="Source Dataset")
138
- source_display = gr.Markdown()
139
- iframe_display = gr.HTML()
140
-
141
- source.change(
142
- save_dataset_name,
143
- inputs=(gr.State("source_dataset"), source),
144
- outputs=(source_display, iframe_display),
145
- )
146
 
147
- with gr.Column():
148
-
149
- destination = gr.Textbox(label="Destination Dataset")
150
- destination_display = gr.Markdown()
151
-
152
- destination.change(
153
- save_dataset_name,
154
- inputs=(gr.State("destination_dataset"), destination),
155
- outputs=destination_display,
156
- )
157
-
158
- with gr.Tab("Object Mask - Point Prompt"):
159
  gr.Markdown("# Image Point Collector with Multiple Separate Mask Overlays")
160
  gr.Markdown(
161
  "Upload an image, click on it, and get each predicted mask overlaid separately in red on individual images."
@@ -185,13 +167,14 @@ with gr.Blocks() as demo:
185
  # selected_mask_output = gr.Image(show_label=False)
186
 
187
  save_button = gr.Button("Save Selected Mask and Image")
188
- save_message = gr.Textbox(visible=False)
189
 
190
  # Define the action triggered by the submit button
191
  submit_button.click(
192
  fn=prompter,
193
  inputs=image_input,
194
  outputs=[image_output_1, image_output_2, image_output_3, gr.State()],
 
195
  )
196
 
197
  # Define the action triggered by mask selection
@@ -205,9 +188,42 @@ with gr.Blocks() as demo:
205
  save_button.click(
206
  fn=save_selected_mask,
207
  inputs=[gr.State(), gr.State()],
208
- outputs=save_message,
209
  show_progress=True,
210
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
 
212
  # Launch the Gradio app
213
  demo.launch()
 
3
  import torch
4
  import numpy as np
5
  from sam2.sam2_image_predictor import SAM2ImagePredictor
 
6
  from uuid import uuid4
7
  import os
8
  from huggingface_hub import upload_folder, login
9
+ from PIL import Image as PILImage
10
+ from datasets import Dataset, Features, Array2D, Image
11
  import shutil
12
+ import time
13
 
14
  MODEL = "facebook/sam2-hiera-large"
15
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  PREDICTOR = SAM2ImagePredictor.from_pretrained(MODEL, device=DEVICE)
17
 
18
+ DESTINATION_DS = "amaye15/object-segmentation"
19
 
20
+ # login(os.getenv("TOKEN"))
 
 
 
21
 
22
  IMAGE = None
23
  MASKS = None
24
+ MASKED_IMAGES = None
25
  INDEX = None
26
 
27
 
 
45
  print(f"Predicted Mask {i+1}:", mask.shape)
46
  red_mask = np.zeros_like(image)
47
  red_mask[:, :, 0] = mask.astype(np.uint8) * 255 # Apply the red channel
48
+ red_mask = PILImage.fromarray(red_mask)
49
 
50
  # Convert the original image to a PIL image
51
+ original_image = PILImage.fromarray(image)
52
 
53
  # Blend the original image with the red mask
54
+ blended_image = PILImage.blend(original_image, red_mask, alpha=0.5)
55
 
56
  # Add the blended image to the list
57
  overlay_images.append(blended_image)
58
 
59
+ global IMAGE, MASKS, MASKED_IMAGES
 
60
  IMAGE, MASKS = image, masks
61
+ MASKED_IMAGES = [np.array(img) for img in overlay_images]
62
 
63
  return overlay_images[0], overlay_images[1], overlay_images[2], masks
64
 
 
81
 
82
  os.makedirs(output_dir, exist_ok=True)
83
 
 
84
  folder_id = str(uuid4())
85
 
 
86
  folder_path = os.path.join(output_dir, folder_id)
87
 
 
88
  os.makedirs(folder_path, exist_ok=True)
89
 
90
+ data_path = os.path.join(folder_path, "data.parquet")
91
+
92
+ data = {
93
+ "image": IMAGE,
94
+ "masked_image": MASKED_IMAGES[INDEX],
95
+ "mask": MASKS[INDEX],
96
+ }
97
+
98
+ features = Features(
99
+ {
100
+ "image": Image(),
101
+ "masked_image": Image(),
102
+ "mask": Array2D(
103
+ dtype="int64", shape=(MASKS[INDEX].shape[0], MASKS[INDEX].shape[1])
104
+ ),
105
+ }
106
+ )
107
 
108
+ ds = Dataset.from_list([data], features=features)
109
+ ds.to_parquet(data_path)
110
 
 
111
  upload_folder(
112
  folder_path=output_dir,
113
+ repo_id=DESTINATION_DS,
 
114
  repo_type="dataset",
 
115
  )
116
 
117
  shutil.rmtree(folder_path)
118
 
119
+ iframe_code = "Success - Check out the 'Results' tab."
120
 
121
+ return iframe_code
122
 
123
+ # time.sleep(5)
 
 
124
 
125
+ # # Add a random query parameter to force reload
126
+ # random_param = uuid4()
127
+ # iframe_code = f"""
128
+ # <iframe
129
+ # src="https://huggingface.co/datasets/{DESTINATION_DS}/embed/viewer/default/train"
130
+ # frameborder="0"
131
+ # width="100%"
132
+ # height="560px"
133
+ # ></iframe>
134
+ # """
135
 
136
 
137
  # Define the Gradio Blocks app
138
  with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
+ with gr.Tab("Object Segmentation - Point Prompt"):
 
 
 
 
 
 
 
 
 
 
 
141
  gr.Markdown("# Image Point Collector with Multiple Separate Mask Overlays")
142
  gr.Markdown(
143
  "Upload an image, click on it, and get each predicted mask overlaid separately in red on individual images."
 
167
  # selected_mask_output = gr.Image(show_label=False)
168
 
169
  save_button = gr.Button("Save Selected Mask and Image")
170
+ iframe_display = gr.Markdown()
171
 
172
  # Define the action triggered by the submit button
173
  submit_button.click(
174
  fn=prompter,
175
  inputs=image_input,
176
  outputs=[image_output_1, image_output_2, image_output_3, gr.State()],
177
+ show_progress=True,
178
  )
179
 
180
  # Define the action triggered by mask selection
 
188
  save_button.click(
189
  fn=save_selected_mask,
190
  inputs=[gr.State(), gr.State()],
191
+ outputs=iframe_display,
192
  show_progress=True,
193
  )
194
+ with gr.Tab("Results"):
195
+ with gr.Row():
196
+ gr.HTML(
197
+ f"""
198
+ <iframe
199
+ src="https://huggingface.co/datasets/{DESTINATION_DS}/embed/viewer/default/train"
200
+ frameborder="0"
201
+ width="100%"
202
+ height="560px"
203
+ ></iframe>
204
+ """
205
+ )
206
+ # with gr.Column():
207
+ # source = gr.Textbox(label="Source Dataset")
208
+ # source_display = gr.Markdown()
209
+ # iframe_display = gr.HTML()
210
+
211
+ # source.change(
212
+ # save_dataset_name,
213
+ # inputs=(gr.State("source_dataset"), source),
214
+ # outputs=(source_display, iframe_display),
215
+ # )
216
+
217
+ # with gr.Column():
218
+
219
+ # destination = gr.Textbox(label="Destination Dataset")
220
+ # destination_display = gr.Markdown()
221
+
222
+ # destination.change(
223
+ # save_dataset_name,
224
+ # inputs=(gr.State("destination_dataset"), destination),
225
+ # outputs=destination_display,
226
+ # )
227
 
228
  # Launch the Gradio app
229
  demo.launch()
check.py DELETED
@@ -1,10 +0,0 @@
1
- import numpy as np
2
- import matplotlib.pyplot as plt
3
-
4
- # Load the image data from the .npy file
5
- image = np.load("/Users/andrewmayes/Dev/image/image.npy")
6
-
7
- # Display the image using matplotlib
8
- plt.imshow(image)
9
- plt.axis("off") # Turn off the axis labels
10
- plt.show() # Show the image
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -2,5 +2,7 @@ gradio
2
  gradio-image-prompter
3
  huggingface-hub
4
  Pillow
5
- opencv-python
6
  git+https://github.com/facebookresearch/segment-anything-2.git
 
 
 
 
2
  gradio-image-prompter
3
  huggingface-hub
4
  Pillow
 
5
  git+https://github.com/facebookresearch/segment-anything-2.git
6
+ pyarrow
7
+ fastparquet
8
+ datasets