ZiyuG commited on
Commit
762c88c
1 Parent(s): 028b124

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +390 -58
app.py CHANGED
@@ -1,61 +1,393 @@
 
1
  import gradio as gr
2
- from huggingface_hub import hf_hub_download, HfFolder
3
- from PIL import Image
4
- import requests, torch
5
  import numpy as np
6
- from io import BytesIO
7
  import plotly.graph_objects as go
8
- import os
9
-
10
- def load_ScanNet_sample(data_path):
11
-
12
- all_data = torch.load(data_path)
13
-
14
- point = np.array(all_data['coord'])
15
- color = np.array(all_data['color'])
16
-
17
- point = point - point.min(axis=0)
18
- point = point / point.max(axis=0)
19
- color = color / 255.
20
- return point, color
21
-
22
- def show_logo():
23
- repo_id = "ZiyuG/Cache"
24
- filename = "scene0000_00.pth"
25
- token = os.getenv('HF_TOKEN')
26
- print("token:", token)
27
-
28
- try:
29
- file_path = hf_hub_download(repo_id=repo_id, filename=filename, use_auth_token=token, repo_type='dataset')
30
- point, color = load_ScanNet_sample(file_path)
31
- if point.shape[0] > 100000:
32
- indices = np.random.choice(point.shape[0], 100000, replace=False)
33
- point = point[indices]
34
- color = color[indices]
35
- except Exception as e:
36
- print(e)
37
- point = np.random.rand(8000, 3)
38
- color = np.random.rand(8000, 3)
39
-
40
- fig = go.Figure(
41
- data=[
42
- go.Scatter3d(
43
- x=point[:,0], y=point[:,1], z=point[:,2],
44
- mode='markers',
45
- marker=dict(size=1, color=color, opacity=0.5),
46
- )
47
- ],
48
- layout=dict(
49
- scene=dict(
50
- xaxis=dict(visible=False),
51
- yaxis=dict(visible=False),
52
- zaxis=dict(visible=False),
53
- aspectratio=dict(x=1, y=1, z=1),
54
- camera=dict(eye=dict(x=1.5, y=1.5, z=1.5))
55
- )
56
- )
57
- )
58
- return fig
59
-
60
- iface = gr.Interface(fn=show_logo, inputs=[], outputs=gr.Plot(), title="Display Logo")
61
- iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pickle import FALSE
2
  import gradio as gr
 
 
 
3
  import numpy as np
 
4
  import plotly.graph_objects as go
5
+ from sam2point import dataset
6
+ import sam2point.configs as configs
7
+ from demo_utils import run_demo, create_box
8
+ # Sample data for dropdowns
9
+ samples = {
10
+ "3D Indoor Scene - S3DIS": ["Conference Room", "Restroom", "Lobby", "Office1", "Office2"],
11
+ # "3D Indoor Scene - ScanNet": ["Scene1", "Scene2", "Scene3", "Scene4", "Scene5"],
12
+ "3D Indoor Scene - ScanNet": ["Scene1", "Scene2", "Scene3", "Scene4", "Scene5", "Scene6"],
13
+ "3D Outdoor Driving Scene - KITTI": ["Scene1", "Scene2", "Scene3", "Scene4", "Scene5", "Scene6"],
14
+ "3D Outdoor Street Scene - Semantic3D": ["Scene1", "Scene2", "Scene3", "Scene4", "Scene5", "Scene6", "Scene7"],
15
+ "3D Object - Objaverse": ["Plant", "Lego", "Lock", "Eleplant", "Knife Rest", "Skateboard", "Popcorn Machine", "Stove", "Bus Shelter", "Thor Hammer", "Horse"],
16
+ # "3D Object - Objaverse": ["Plant", "Eleplant", "Knife Rest", "Skateboard", "Popcorn Machine", "Stove", "Bus Shelter", "Thor Hammer", "Horse", "Dinner Booth"],
17
+ }
18
+
19
+
20
+ PATH = {
21
+ "S3DIS": ['Area_1_conferenceRoom_1.txt', 'Area_2_WC_1.txt', 'Area_4_lobby_2.txt', 'Area_5_office_3.txt', 'Area_6_office_9.txt'],
22
+ # "ScanNet": ['scene0001_01.pth', 'scene0005_01.pth', 'scene0010_01.pth', 'scene0016_02.pth', 'scene0019_01.pth'],
23
+ "ScanNet": ['scene0005_01.pth', 'scene0010_01.pth', 'scene0016_02.pth', 'scene0019_01.pth', 'scene0000_00.pth', 'scene0002_00.pth'],
24
+ "Objaverse": ["plant.npy", "human.npy", "lock.npy", "elephant.npy", "knife_rest.npy", "skateboard.npy", "popcorn_machine.npy", "stove.npy", "bus_shelter.npy", "thor_hammer.npy", "horse.npy"],
25
+ # "Objaverse": ["plant.npy", "elephant.npy", "knife_rest.npy", "skateboard.npy", "popcorn_machine.npy", "stove.npy", "bus_shelter.npy", "thor_hammer.npy", "horse.npy", "dinner_booth.npy"],
26
+ "KITTI": ["scene1.npy", "scene2.npy", "scene3.npy", "scene4.npy", "scene5.npy", "scene6.npy"],
27
+ "Semantic3D": ["scene1.npy", "scene2.npy", "patch19.npy", "patch0.npy", "patch1.npy", "patch50.npy", "patch62.npy"]
28
+ }
29
+
30
+
31
+ prompt_types = ["Point", "Box", "Mask"]
32
+
33
+
34
+ # def select(name, sample_idx):
35
+ # DATASET = name.split('-')[1].replace(" ", "")
36
+ # gr.Info(f"Visualizing {DATASET} Example {str(sample_idx)}...")
37
+
38
+
39
+
40
+
41
+ # Function to load and display 3D scene or object
42
+ def load_3d_scene(name, sample_idx=-1, type_=None, prompt=None, final=False, new_color=None):
43
+ DATASET = name.split('-')[1].replace(" ", "")
44
+ path = 'data/' + DATASET + '/' + PATH[DATASET][sample_idx]
45
+ asp, SIZE = 1., 1
46
+ # load data
47
+ print(path)
48
+ if DATASET == 'S3DIS':
49
+ point, color = dataset.load_S3DIS_sample(path, sample=True)
50
+ alpha = 1
51
+ elif DATASET == 'ScanNet':
52
+ point, color = dataset.load_ScanNet_sample(path)
53
+ alpha = 1
54
+ elif DATASET == 'Objaverse':
55
+ point, color = dataset.load_Objaverse_sample(path)
56
+ alpha = 1
57
+ SIZE = 2
58
+ elif DATASET == 'KITTI':
59
+ point, color = dataset.load_KITTI_sample(path)
60
+ asp = 0.3
61
+ alpha = 0.7
62
+ elif DATASET == 'Semantic3D':
63
+ point, color = dataset.load_Semantic3D_sample(path, sample_idx, sample=True)
64
+ alpha = 0.2
65
+ print("Loading Dataset:", DATASET, ", Point Cloud Size:", point.shape)
66
+
67
+
68
+ ##### Initial Showing #####
69
+ if not type_:
70
+ if point.shape[0] > 100000:
71
+ indices = np.random.choice(point.shape[0], 100000, replace=False)
72
+ point = point[indices]
73
+ color = color[indices]
74
+ # #NOTE KITTI
75
+ # mask1 = point[:, 1] <= 0.8
76
+ # mask4 = point[:, 1] >= 0.6
77
+ # mask2 = point[:, 0] >= 0.3
78
+ # mask3 = point[:, 0] <= 0.7
79
+ # mask = mask1 & mask2 & mask3 & mask4
80
+ # point = point[mask]
81
+ # color = color[mask]
82
+ # alpha = 1
83
+ # ######
84
+ fig = go.Figure(
85
+ data=[
86
+ go.Scatter3d(
87
+ x=point[:,0], y=point[:,1], z=point[:,2],
88
+ mode='markers',
89
+ marker=dict(size=SIZE, color=color, opacity=alpha),
90
+ name=""
91
+ )
92
+ ],
93
+ layout=dict(
94
+ scene=dict(
95
+ xaxis=dict(visible=False),
96
+ yaxis=dict(visible=False),
97
+ zaxis=dict(visible=False),
98
+ aspectratio=dict(x=1, y=1, z=asp),
99
+ camera=dict(eye=dict(x=1.5, y=1.5, z=1.5))
100
+ )
101
+ )
102
+ )
103
+ return fig
104
+ ##### Final
105
+ if final:
106
+ color = new_color
107
+ green = np.array([[0.1, 0.1, 0.1]])
108
+ add_green = go.Scatter3d(
109
+ x=green[:,0], y=green[:,1], z=green[:,2],
110
+ mode='markers',
111
+ marker=dict(size=0.0001, color='green', opacity=1),
112
+ name="Segmentation Results"
113
+ )
114
+ if type_ == "box":
115
+ if point.shape[0] > 100000:
116
+ indices = np.random.choice(point.shape[0], 100000, replace=False)
117
+ point = point[indices]
118
+ color = color[indices]
119
+ # mask = point[:, 1] < 0.8
120
+ # point = point[mask]
121
+ # color = color[mask]
122
+ # alpha = 1
123
+ scatter = go.Scatter3d(
124
+ x=point[:,0], y=point[:,1], z=point[:,2],
125
+ mode='markers',
126
+ marker=dict(size=SIZE, color=color, opacity=alpha),
127
+ name="3D Object/Scene"
128
+ )
129
+ if final: scatter = [scatter, add_green] + create_box(prompt)
130
+ else: scatter = [scatter] + create_box(prompt)
131
+
132
+ elif type_ == "point":
133
+ prompt = np.array([prompt])
134
+ new = go.Scatter3d(
135
+ x=prompt[:,0], y=prompt[:,1], z=prompt[:,2],
136
+ mode='markers',
137
+ # marker=dict(size=5, color='red', opacity=1),
138
+ # marker=dict(size=5, color='rgb(255, 140, 0)', opacity=1),
139
+ marker=dict(size=5, color='rgb(139, 0, 0)', opacity=1),
140
+ name="Point Prompt"
141
+ )
142
+ # print(point.shape, color.shape, new_color.shape)
143
+ if point.shape[0] > 100000:
144
+ indices = np.random.choice(point.shape[0], 100000, replace=False)
145
+ point = point[indices]
146
+ color = color[indices]
147
+ # #NOTE KITTI
148
+ # mask1 = point[:, 1] <= 0.8
149
+ # mask = point[:, 1] >= 0.35 #2
150
+ # < 0.63 #3
151
+ # mask2 = point[:, 0] >= 0.3
152
+ # mask3 = point[:, 0] <= 0.7
153
+ # mask = mask1 & mask2 & mask3 & mask4
154
+ # #NOTE S3DIS
155
+ # if DATASET == 'S3DIS':
156
+ # mask = point[:, 0] > 0.04
157
+ # point = point[mask]
158
+ # color = color[mask]
159
+ # alpha = 1
160
+ # ######
161
+ scatter = go.Scatter3d(
162
+ x=point[:,0], y=point[:,1], z=point[:,2],
163
+ mode='markers',
164
+ marker=dict(size=SIZE, color=color, opacity=alpha),
165
+ name="3D Object/Scene"
166
+ )
167
+ if final: scatter = [scatter, new, add_green]
168
+ else: scatter = [scatter, new]
169
+ elif type_ == 'mask' and not final:
170
+ color = np.clip(prompt * 255, 0, 255).astype(np.uint8)
171
+ if point.shape[0] > 100000:
172
+ indices = np.random.choice(point.shape[0], 100000, replace=False)
173
+ point = point[indices]
174
+ color = color[indices]
175
+ scatter = go.Scatter3d(
176
+ x=point[:,0], y=point[:,1], z=point[:,2],
177
+ mode='markers',
178
+ marker=dict(size=SIZE, color=color, opacity=alpha),
179
+ name="3D Object/Scene"
180
+ )
181
+ red = np.array([[0.1, 0.1, 0.1]])
182
+ add_red = go.Scatter3d(
183
+ x=red[:,0], y=red[:,1], z=red[:,2],
184
+ mode='markers',
185
+ marker=dict(size=0.0001, color='red', opacity=1),
186
+ name="Mask Prompt"
187
+ )
188
+ scatter = [scatter, add_red]
189
+ elif type_ == 'mask' and final:
190
+ if point.shape[0] > 100000:
191
+ indices = np.random.choice(point.shape[0], 100000, replace=False)
192
+ point = point[indices]
193
+ color = color[indices]
194
+ # # cut
195
+ # mask = point[:, 0] > 0.1
196
+ # point = point[mask]
197
+ # color = color[mask]
198
+ # alpha = 1
199
+ # ######
200
+ scatter = go.Scatter3d(
201
+ x=point[:,0], y=point[:,1], z=point[:,2],
202
+ mode='markers',
203
+ marker=dict(size=SIZE, color=color, opacity=alpha),
204
+ name="3D Object/Scene"
205
+ )
206
+ scatter = [scatter, add_green]
207
+ print(point.shape, color.shape)
208
+ else:
209
+ print("Wrong Prompt Type")
210
+ exit(1)
211
+
212
+
213
+ fig = go.Figure(
214
+ data=scatter,
215
+ layout=dict(
216
+ scene=dict(
217
+ xaxis=dict(visible=False),
218
+ yaxis=dict(visible=False),
219
+ zaxis=dict(visible=False),
220
+ aspectratio=dict(x=1, y=1, z=asp),
221
+ camera=dict(eye=dict(x=1.5, y=1.5, z=1.5))
222
+ )
223
+ )
224
+ )
225
+ return fig
226
+
227
+
228
+
229
+
230
+ # Function to display prompt in 3D
231
+ def show_prompt_in_3d(name, sample_idx, prompt_type, prompt_idx):
232
+ DATASET = name.split('-')[1].replace(" ", "")
233
+ TYPE = prompt_type.lower()
234
+ theta = 0. if DATASET in "S3DIS ScanNet" else 0.5
235
+ mode = "bilinear" if DATASET in "S3DIS ScanNet" else 'nearest'
236
+
237
+
238
+ prompt = run_demo(DATASET, TYPE, sample_idx, prompt_idx, 0.02, theta, mode, ret_prompt=True)
239
+ fig = load_3d_scene(name, sample_idx, TYPE, prompt)
240
+ return fig
241
+
242
+
243
+
244
+
245
+ # Function to start segmentation
246
+ def start_segmentation(name=None, sample_idx=None, prompt_type=None, prompt_idx=None, vx=0.02):
247
+ if name == None or sample_idx == None or prompt_type == None or prompt_idx == None:
248
+ return gr.Plot(), gr.Textbox(label="Response", value="Please ensure all options are selected.", visible=True)
249
+
250
+ DATASET = name.split('-')[1].replace(" ", "")
251
+ TYPE = prompt_type.lower()
252
+ theta = 0. if DATASET in "S3DIS ScanNet" else 0.5
253
+ mode = "bilinear" if DATASET in "S3DIS ScanNet" else 'nearest'
254
+
255
+
256
+ new_color, prompt = run_demo(DATASET, TYPE, sample_idx, prompt_idx, vx, theta, mode, ret_prompt=False)
257
+ fig = load_3d_scene(name, sample_idx, TYPE, prompt, final=True, new_color=new_color)
258
+ return fig, gr.Textbox(label="Response", value="Segmentation completed successfully!", visible=True)
259
+
260
+
261
+
262
+
263
+ def update1(datasets):
264
+ if 'Objaverse' in datasets:
265
+ return gr.Radio(label="Select 3D Object", choices=samples[datasets]), gr.Textbox(label="Response", value="", visible=True) #, gr.Slider(minimum=0.01, maximum=0.15, step=0.001, label="Voxel Size", value=0.02)
266
+ return gr.Radio(label="Select 3D Scene", choices=samples[datasets]), gr.Textbox(label="Response", value="", visible=True) #, gr.Slider(minimum=0.01, maximum=0.15, step=0.001, label="Voxel Size", value=0.02)
267
+
268
+
269
+ def update2(name, sample_idx, prompt_type):
270
+ if name == None or sample_idx == None or prompt_type == None:
271
+ return gr.Radio(label="Select Prompt Example", choices=[]), gr.Textbox(label="Response", value="", visible=True) #, gr.Slider(minimum=0.01, maximum=0.15, step=0.001, label="Voxel Size", value=0.02)
272
+ DATASET = name.split('-')[1].replace(" ", "")
273
+ TYPE = prompt_type.lower() + '_prompts'
274
+ # if DATASET in "ScanNet" and prompt_type == 'Mask': TYPE = 'point_prompts'
275
+ if DATASET == 'S3DIS':
276
+ info = configs.S3DIS_samples[sample_idx][TYPE]
277
+ elif DATASET == 'ScanNet':
278
+ info = configs.ScanNet_samples[sample_idx][TYPE]
279
+ elif DATASET == 'Objaverse':
280
+ info = configs.Objaverse_samples[sample_idx][TYPE]
281
+ elif DATASET == 'KITTI':
282
+ info = configs.KITTI_samples[sample_idx][TYPE]
283
+ elif DATASET == 'Semantic3D':
284
+ info = configs.Semantic3D_samples[sample_idx][TYPE]
285
+
286
+ cur = ['Example ' + str(i) for i in range(1, len(info) + 1)]
287
+ return gr.Radio(label="Select Prompt Example", choices=cur), gr.Textbox(label="Response", value="", visible=True) #, gr.Slider(minimum=0.01, maximum=0.15, step=0.001, label="Voxel Size", value=0.02)
288
+
289
+
290
+ def update3(name, sample_idx, prompt_type, prompt_idx):
291
+ if name == None or sample_idx == None or prompt_type == None:
292
+ return gr.Textbox(label="Response", value="", visible=True), gr.Slider(minimum=0.01, maximum=0.15, step=0.001, label="Voxel Size", value=0.02)
293
+ DATASET = name.split('-')[1].replace(" ", "")
294
+ TYPE = configs.VOXEL[prompt_type.lower()]
295
+ if DATASET in "S3DIS ScanNet":
296
+ vx_ = 0.02
297
+ elif DATASET == 'Objaverse':
298
+ vx_ = configs.Objaverse_samples[sample_idx][TYPE][prompt_idx]
299
+ elif DATASET == 'KITTI':
300
+ vx_ = configs.KITTI_samples[sample_idx][TYPE][prompt_idx]
301
+ elif DATASET == 'Semantic3D':
302
+ vx_ = configs.Semantic3D_samples[sample_idx][TYPE][prompt_idx]
303
+
304
+ return gr.Textbox(label="Response", value="", visible=True), gr.Slider(minimum=0.01, maximum=0.15, step=0.001, label="Voxel Size", value=vx_)
305
+
306
+
307
+ def main():
308
+ title = """<h1 style="font-variant: small-caps; font-weight: bold; text-align: center;" align="center">SAM2Point</h1>
309
+ <h3 align="center"><b>Segment Any 3D as Videos in Zero-shot and Promptable Manners</h3>
310
+ <br>
311
+ """
312
+ title = """
313
+ <h1 style="text-align: center;">
314
+ <div style="width: 1.2em; height: 1.2em; display: inline-block;"><img src="https://github.com/ZiyuGuo99/ZiyuGuo99.github.io/blob/main/assets/img/logo.png?raw=true" style='width: 100%; height: 100%; object-fit: contain;' /></div>
315
+ <span style="font-variant: small-caps; font-weight: bold;">Sam2Point</span>
316
+ </h1>
317
+ <h3 align="center"><span style="font-variant: small-caps; ">Segment Any 3D as Videos in Zero-shot and Promptable Manners
318
+ </span></h3>"""
319
+
320
+ with gr.Blocks(
321
+ css="""
322
+ .contain { display: flex; flex-direction: column; }
323
+ .gradio-container { height: 100vh !important; }
324
+ #col_container { height: 100%; }
325
+ pre {
326
+ white-space: pre-wrap; /* Since CSS 2.1 */
327
+ white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
328
+ white-space: -pre-wrap; /* Opera 4-6 */
329
+ white-space: -o-pre-wrap; /* Opera 7 */
330
+ word-wrap: break-word; /* Internet Explorer 5.5+ */
331
+ }""",
332
+ js="""
333
+ function refresh() {
334
+ const url = new URL(window.location);
335
+
336
+
337
+ if (url.searchParams.get('__theme') !== 'light') {
338
+ url.searchParams.set('__theme', 'light');
339
+ window.location.href = url.href;
340
+ }
341
+ }""",
342
+ title="SAM2Point: Segment Any 3D as Videos in Zero-shot and Promptable Manners",
343
+ theme=gr.themes.Soft()
344
+ ) as app:
345
+ gr.HTML(title)
346
+ with gr.Row():
347
+ with gr.Column(elem_id="col_container"):
348
+ sample_dropdown = gr.Dropdown(label="Select 3D Data Type", choices=samples, type="value")
349
+ scene_dropdown = gr.Radio(label="Select 3D Object/Scene", choices=[], type="index")
350
+ show_button = gr.Button("Show 3D Scene/Object")
351
+ prompt_type_dropdown = gr.Radio(label="Select Prompt Type", choices=prompt_types)
352
+ prompt_sample_dropdown = gr.Radio(label="Select Prompt Example", choices=[], type="index")
353
+ show_prompt_button = gr.Button("Show Prompt in 3D Scene/Object")
354
+ # show_button.input(select, [sample_dropdown, scene_dropdown], [])
355
+ with gr.Column():
356
+ # vx = gr.Slider(minimum=0.01, maximum=0.15, step=0.001, label="Voxel Size", value=0.02)
357
+ start_segment_button = gr.Button("Start Segmentation")
358
+ plot1 = gr.Plot()
359
+
360
+
361
+
362
+
363
+ response = gr.Textbox(label="Response")
364
+
365
+ sample_dropdown.change(update1, sample_dropdown, [scene_dropdown, response])
366
+ sample_dropdown.change(update2, [sample_dropdown, scene_dropdown, prompt_type_dropdown], [prompt_sample_dropdown, response])
367
+ scene_dropdown.change(update2, [sample_dropdown, scene_dropdown, prompt_type_dropdown], [prompt_sample_dropdown, response])
368
+ prompt_type_dropdown.change(update2, [sample_dropdown, scene_dropdown, prompt_type_dropdown], [prompt_sample_dropdown, response])
369
+
370
+ # sample_dropdown.change(update1, sample_dropdown, [scene_dropdown, response, vx])
371
+ # sample_dropdown.change(update2, [sample_dropdown, scene_dropdown, prompt_type_dropdown], [prompt_sample_dropdown, response, vx])
372
+ # scene_dropdown.change(update2, [sample_dropdown, scene_dropdown, prompt_type_dropdown], [prompt_sample_dropdown, response, vx])
373
+ # prompt_type_dropdown.change(update2, [sample_dropdown, scene_dropdown, prompt_type_dropdown], [prompt_sample_dropdown, response, vx])
374
+ # prompt_sample_dropdown.change(update3, [sample_dropdown, scene_dropdown, prompt_type_dropdown, prompt_sample_dropdown], [response, vx])
375
+
376
+ # Logic to handle interactions
377
+ show_button.click(load_3d_scene, inputs=[sample_dropdown, scene_dropdown], outputs=plot1)
378
+ show_prompt_button.click(show_prompt_in_3d, inputs=[sample_dropdown, scene_dropdown, prompt_type_dropdown, prompt_sample_dropdown], outputs=plot1)
379
+ # start_segment_button.click(start_segmentation, inputs=[sample_dropdown, scene_dropdown, prompt_type_dropdown, prompt_sample_dropdown, vx], outputs=[plot1, response])
380
+ start_segment_button.click(start_segmentation, inputs=[sample_dropdown, scene_dropdown, prompt_type_dropdown, prompt_sample_dropdown], outputs=[plot1, response])
381
+
382
+ app.queue(status_update_rate="auto")
383
+ app.launch(share=True, favicon_path="./logo.png")
384
+
385
+
386
+ if __name__ == "__main__":
387
+ main()
388
+
389
+
390
+
391
+
392
+
393
+