MattSymetry commited on
Commit
fa2a575
1 Parent(s): 0786f28
Files changed (6) hide show
  1. README.md +5 -5
  2. app.py +287 -0
  3. images/chair.png +0 -0
  4. images/corgi.png +0 -0
  5. images/cube_stack.jpg +0 -0
  6. requirements.txt +5 -0
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Metry Mattia
3
- emoji: 📊
4
- colorFrom: gray
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.21.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: Point-e Demo
3
+ emoji: 🐢
4
+ colorFrom: yellow
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 3.17.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from PIL import Image
3
+ import torch
4
+
5
+ from point_e.diffusion.configs import DIFFUSION_CONFIGS, diffusion_from_config
6
+ from point_e.diffusion.sampler import PointCloudSampler
7
+ from point_e.models.download import load_checkpoint
8
+ from point_e.models.configs import MODEL_CONFIGS, model_from_config
9
+ from point_e.util.plotting import plot_point_cloud
10
+ from point_e.util.pc_to_mesh import marching_cubes_mesh
11
+
12
+ import skimage.measure
13
+
14
+ from pyntcloud import PyntCloud
15
+ import matplotlib.colors
16
+ import plotly.graph_objs as go
17
+
18
+ import trimesh
19
+
20
+ import gradio as gr
21
+
22
+
23
+ state = ""
24
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
25
+
26
+ def set_state(s):
27
+ print(s)
28
+ global state
29
+ state = s
30
+
31
+ def get_state():
32
+ return state
33
+
34
+ set_state('Creating txt2mesh model...')
35
+ t2m_name = 'base40M-textvec'
36
+ t2m_model = model_from_config(MODEL_CONFIGS[t2m_name], device)
37
+ t2m_model.eval()
38
+ base_diffusion_t2m = diffusion_from_config(DIFFUSION_CONFIGS[t2m_name])
39
+
40
+ set_state('Downloading txt2mesh checkpoint...')
41
+ t2m_model.load_state_dict(load_checkpoint(t2m_name, device))
42
+
43
+
44
+ def load_img2mesh_model(model_name):
45
+ set_state(f'Creating img2mesh model {model_name}...')
46
+ i2m_name = model_name
47
+ i2m_model = model_from_config(MODEL_CONFIGS[i2m_name], device)
48
+ i2m_model.eval()
49
+ base_diffusion_i2m = diffusion_from_config(DIFFUSION_CONFIGS[i2m_name])
50
+
51
+ set_state(f'Downloading img2mesh checkpoint {model_name}...')
52
+ i2m_model.load_state_dict(load_checkpoint(i2m_name, device))
53
+
54
+ return i2m_model, base_diffusion_i2m
55
+
56
+ img2mesh_model_name = 'base40M' #'base300M' #'base1B'
57
+ i2m_model, base_diffusion_i2m = load_img2mesh_model(img2mesh_model_name)
58
+
59
+
60
+ set_state('Creating upsample model...')
61
+ upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device)
62
+ upsampler_model.eval()
63
+ upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample'])
64
+
65
+ set_state('Downloading upsampler checkpoint...')
66
+ upsampler_model.load_state_dict(load_checkpoint('upsample', device))
67
+
68
+ set_state('Creating SDF model...')
69
+ sdf_name = 'sdf'
70
+ sdf_model = model_from_config(MODEL_CONFIGS[sdf_name], device)
71
+ sdf_model.eval()
72
+
73
+ set_state('Loading SDF model...')
74
+ sdf_model.load_state_dict(load_checkpoint(sdf_name, device))
75
+
76
+ stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5")
77
+
78
+
79
+ set_state('')
80
+
81
+ def get_sampler(model_name, txt2obj, guidance_scale):
82
+
83
+ global img2mesh_model_name
84
+ global base_diffusion_i2m
85
+ global i2m_model
86
+ if model_name != img2mesh_model_name:
87
+ img2mesh_model_name = model_name
88
+ i2m_model, base_diffusion_i2m = load_img2mesh_model(model_name)
89
+
90
+ return PointCloudSampler(
91
+ device=device,
92
+ models=[t2m_model if txt2obj else i2m_model, upsampler_model],
93
+ diffusions=[base_diffusion_t2m if txt2obj else base_diffusion_i2m, upsampler_diffusion],
94
+ num_points=[1024, 4096 - 1024],
95
+ aux_channels=['R', 'G', 'B'],
96
+ guidance_scale=[guidance_scale, 0.0 if txt2obj else guidance_scale],
97
+ model_kwargs_key_filter=('texts', '') if txt2obj else ("*",)
98
+ )
99
+
100
+ def generate_txt2img(prompt):
101
+
102
+ prompt = f"“a 3d rendering of {prompt}, full view, white background"
103
+ gallery_dir = stable_diffusion(prompt, fn_index=2)
104
+ imgs = [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir) if os.path.splitext(img)[1] == '.jpg']
105
+
106
+ return imgs[0], gr.update(visible=True)
107
+
108
+ def generate_3D(input, model_name='base40M', guidance_scale=3.0, grid_size=32):
109
+
110
+ set_state('Entered generate function...')
111
+
112
+ if isinstance(input, Image.Image):
113
+ input = prepare_img(input)
114
+
115
+ # if input is a string, it's a text prompt
116
+ sampler = get_sampler(model_name, txt2obj=True if isinstance(input, str) else False, guidance_scale=guidance_scale)
117
+
118
+ # Produce a sample from the model.
119
+ set_state('Sampling...')
120
+ samples = None
121
+ kw_args = dict(texts=[input]) if isinstance(input, str) else dict(images=[input])
122
+ for x in sampler.sample_batch_progressive(batch_size=1, model_kwargs=kw_args):
123
+ samples = x
124
+
125
+ set_state('Converting to point cloud...')
126
+ pc = sampler.output_to_point_clouds(samples)[0]
127
+
128
+ set_state('Saving point cloud...')
129
+ with open("point_cloud.ply", "wb") as f:
130
+ pc.write_ply(f)
131
+
132
+ set_state('Converting to mesh...')
133
+ save_ply(pc, 'mesh.ply', grid_size)
134
+
135
+ set_state('')
136
+
137
+ return pc_to_plot(pc), ply_to_obj('mesh.ply', '3d_model.obj'), gr.update(value=['3d_model.obj', 'mesh.ply', 'point_cloud.ply'], visible=True)
138
+
139
+ def prepare_img(img):
140
+
141
+ w, h = img.size
142
+ if w > h:
143
+ img = img.crop((w - h) / 2, 0, w - (w - h) / 2, h)
144
+ else:
145
+ img = img.crop((0, (h - w) / 2, w, h - (h - w) / 2))
146
+
147
+ # resize to 256x256
148
+ img = img.resize((256, 256))
149
+
150
+ return img
151
+
152
+ def pc_to_plot(pc):
153
+
154
+ return go.Figure(
155
+ data=[
156
+ go.Scatter3d(
157
+ x=pc.coords[:,0], y=pc.coords[:,1], z=pc.coords[:,2],
158
+ mode='markers',
159
+ marker=dict(
160
+ size=2,
161
+ color=['rgb({},{},{})'.format(r,g,b) for r,g,b in zip(pc.channels["R"], pc.channels["G"], pc.channels["B"])],
162
+ )
163
+ )
164
+ ],
165
+ layout=dict(
166
+ scene=dict(xaxis=dict(visible=False), yaxis=dict(visible=False), zaxis=dict(visible=False))
167
+ ),
168
+ )
169
+
170
+ def ply_to_obj(ply_file, obj_file):
171
+ mesh = trimesh.load(ply_file)
172
+ mesh.export(obj_file)
173
+
174
+ return obj_file
175
+
176
+ def save_ply(pc, file_name, grid_size):
177
+
178
+ # Produce a mesh (with vertex colors)
179
+ mesh = marching_cubes_mesh(
180
+ pc=pc,
181
+ model=sdf_model,
182
+ batch_size=4096,
183
+ grid_size=grid_size, # increase to 128 for resolution used in evals
184
+ fill_vertex_channels=True,
185
+ progress=True,
186
+ )
187
+
188
+ # Write the mesh to a PLY file to import into some other program.
189
+ with open(file_name, 'wb') as f:
190
+ mesh.write_ply(f)
191
+
192
+
193
+ with gr.Blocks() as app:
194
+ gr.Markdown("## Point-E text-to-3D Demo")
195
+ gr.Markdown("This is a demo for [Point-E: A System for Generating 3D Point Clouds from Complex Prompts](https://arxiv.org/abs/2212.08751) by OpenAI. Check out the [GitHub repo](https://github.com/openai/point-e) for more information.")
196
+ gr.HTML("""To skip the queue you can duplicate this space:
197
+ <br><a href="https://huggingface.co/spaces/anzorq/point-e_demo?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
198
+ <br>Don't forget to change space hardware to <b>GPU</b> after duplicating it.""")
199
+
200
+ with gr.Row():
201
+ with gr.Column():
202
+ with gr.Tab("Text to 3D"):
203
+ prompt = gr.Textbox(label="Prompt", placeholder="A cactus in a pot")
204
+ btn_generate_txt2obj = gr.Button(value="Generate")
205
+
206
+ with gr.Tab("Image to 3D"):
207
+ img = gr.Image(label="Image")
208
+ gr.Markdown("Best results with images of 3D objects with no shadows on a white background.")
209
+ btn_generate_img2obj = gr.Button(value="Generate")
210
+
211
+ with gr.Tab("Text to Image to 3D"):
212
+ gr.Markdown("Generate an image with Stable Diffusion, then convert it to 3D. Just enter the object you want to generate.")
213
+ prompt_sd = gr.Textbox(label="Prompt", placeholder="a 3d rendering of [your prompt], full view, white background")
214
+ btn_generate_txt2sd = gr.Button(value="Generate image")
215
+ img_sd = gr.Image(label="Image")
216
+ btn_generate_sd2obj = gr.Button(value="Convert to 3D", visible=False)
217
+
218
+ with gr.Accordion("Advanced settings", open=False):
219
+ dropdown_models = gr.Dropdown(label="Model", value="base40M", choices=["base40M", "base300M"]) #, "base1B"])
220
+ guidance_scale = gr.Slider(label="Guidance scale", value=3.0, minimum=3.0, maximum=10.0, step=0.1)
221
+ grid_size = gr.Slider(label="Grid size (for .obj 3D model)", value=32, minimum=16, maximum=128, step=16)
222
+
223
+ with gr.Column():
224
+ plot = gr.Plot(label="Point cloud")
225
+ # btn_pc_to_obj = gr.Button(value="Convert to OBJ", visible=False)
226
+ model_3d = gr.Model3D(value=None)
227
+ file_out = gr.File(label="Files", visible=False)
228
+
229
+ # state_info = state_info = gr.Textbox(label="State", show_label=False).style(container=False)
230
+
231
+
232
+ # inputs = [dropdown_models, prompt, img, guidance_scale, grid_size]
233
+ outputs = [plot, model_3d, file_out]
234
+
235
+ prompt.submit(generate_3D, inputs=[prompt, dropdown_models, guidance_scale, grid_size], outputs=outputs)
236
+ btn_generate_txt2obj.click(generate_3D, inputs=[prompt, dropdown_models, guidance_scale, grid_size], outputs=outputs, api_name="generate_txt2obj")
237
+
238
+ btn_generate_img2obj.click(generate_3D, inputs=[img, dropdown_models, guidance_scale, grid_size], outputs=outputs, api_name="generate_img2obj")
239
+
240
+ prompt_sd.submit(generate_txt2img, inputs=prompt_sd, outputs=[img_sd, btn_generate_sd2obj])
241
+ btn_generate_txt2sd.click(generate_txt2img, inputs=prompt_sd, outputs=[img_sd, btn_generate_sd2obj], queue=False)
242
+ btn_generate_sd2obj.click(generate_3D, inputs=[img, dropdown_models, guidance_scale, grid_size], outputs=outputs)
243
+
244
+ # btn_pc_to_obj.click(ply_to_obj, inputs=plot, outputs=[model_3d, file_out])
245
+
246
+ gr.Examples(
247
+ examples=[
248
+ ["a cactus in a pot"],
249
+ ["a round table with floral tablecloth"],
250
+ ["a red kettle"],
251
+ ["a vase with flowers"],
252
+ ["a sports car"],
253
+ ["a man"],
254
+ ],
255
+ inputs=[prompt],
256
+ outputs=outputs,
257
+ fn=generate_3D,
258
+ cache_examples=True
259
+ )
260
+
261
+ gr.Examples(
262
+ examples=[
263
+ ["images/corgi.png"],
264
+ ["images/cube_stack.jpg"],
265
+ ["images/chair.png"],
266
+ ],
267
+ inputs=[img],
268
+ outputs=outputs,
269
+ fn=generate_3D,
270
+ cache_examples=True
271
+ )
272
+
273
+ # app.load(get_state, inputs=[], outputs=state_info, every=0.5, show_progress=False)
274
+
275
+ gr.HTML("""
276
+ <br><br>
277
+ <div style="border-top: 1px solid #303030;">
278
+ <br>
279
+ <p>Space by:<br>
280
+ <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a><br>
281
+ <a href="https://github.com/qunash"><img alt="GitHub followers" src="https://img.shields.io/github/followers/qunash?style=social" alt="Github Follow"></a></p><br>
282
+ <a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 30px !important;width: 102px !important;" ></a><br><br>
283
+ <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.point-e_demo" alt="visitors"></p>
284
+ </div>
285
+ """)
286
+
287
+ app.queue(max_size=250, concurrency_count=6).launch()
images/chair.png ADDED
images/corgi.png ADDED
images/cube_stack.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ git+https://github.com/openai/point-e@main
2
+ pyntcloud
3
+ plotly
4
+ trimesh
5
+