Maikou commited on
Commit
89517ee
1 Parent(s): 1c2f511
app.py CHANGED
@@ -31,8 +31,8 @@ image_model_config_dict = OrderedDict({
31
  "ASLDM-256-obj": {
32
  # "config": "./configs/image_cond_diffuser_asl/image-ASLDM-256.yaml",
33
  # "ckpt_path": "./checkpoints/image_cond_diffuser_asl/image-ASLDM-256.ckpt",
34
- "config": "https://huggingface.co/spaces/Maikou/Michelangelo/tree/main/configs/image_cond_diffuser_asl/image-ASLDM-256.yaml",
35
- "ckpt_path": "https://huggingface.co/spaces/Maikou/Michelangelo/tree/main/checkpoints/image_cond_diffuser_asl/image-ASLDM-256.ckpt",
36
  },
37
  })
38
 
@@ -40,8 +40,8 @@ text_model_config_dict = OrderedDict({
40
  "ASLDM-256": {
41
  # "config": "./configs/text_cond_diffuser_asl/text-ASLDM-256.yaml",
42
  # "ckpt_path": "./checkpoints/text_cond_diffuser_asl/text-ASLDM-256.ckpt",
43
- "config": "https://huggingface.co/spaces/Maikou/Michelangelo/tree/main/configs/text_cond_diffuser_asl/text-ASLDM-256.yaml",
44
- "ckpt_path": "https://huggingface.co/spaces/Maikou/Michelangelo/tree/main/checkpoints/text_cond_diffuser_asl/text-ASLDM-256.ckpt",
45
  },
46
  })
47
 
@@ -313,7 +313,7 @@ with gr.Blocks() as app:
313
  gr.Markdown("2. Note that the Image-conditioned model is trained on multiple 3D datasets like ShapeNet and Objaverse")
314
  gr.Markdown("3. We provide some examples for you to try. You can also upload images or text as input.")
315
  gr.Markdown("4. Welcome to share your amazing results with us, and thanks for your interest in our work!")
316
-
317
  with gr.Row():
318
  with gr.Column():
319
 
@@ -360,7 +360,7 @@ with gr.Blocks() as app:
360
  file_out = gr.File(label="Files", visible=False)
361
 
362
  outputs = [model_3d, file_out]
363
- print(os.path.abspath(os.path.dirname(__file__)))
364
  img.upload(disable_cache, outputs=cache_dir)
365
  examples.select(set_cache, outputs=[img, cache_dir])
366
  print(f'line:404: {cache_dir}')
 
31
  "ASLDM-256-obj": {
32
  # "config": "./configs/image_cond_diffuser_asl/image-ASLDM-256.yaml",
33
  # "ckpt_path": "./checkpoints/image_cond_diffuser_asl/image-ASLDM-256.ckpt",
34
+ "config": "/home/user/app/configs/image_cond_diffuser_asl/image-ASLDM-256.yaml",
35
+ "ckpt_path": "/home/user/app/checkpoints/image_cond_diffuser_asl/image-ASLDM-256.ckpt",
36
  },
37
  })
38
 
 
40
  "ASLDM-256": {
41
  # "config": "./configs/text_cond_diffuser_asl/text-ASLDM-256.yaml",
42
  # "ckpt_path": "./checkpoints/text_cond_diffuser_asl/text-ASLDM-256.ckpt",
43
+ "config": "/home/user/app/configs/text_cond_diffuser_asl/text-ASLDM-256.yaml",
44
+ "ckpt_path": "/home/user/app/checkpoints/text_cond_diffuser_asl/text-ASLDM-256.ckpt",
45
  },
46
  })
47
 
 
313
  gr.Markdown("2. Note that the Image-conditioned model is trained on multiple 3D datasets like ShapeNet and Objaverse")
314
  gr.Markdown("3. We provide some examples for you to try. You can also upload images or text as input.")
315
  gr.Markdown("4. Welcome to share your amazing results with us, and thanks for your interest in our work!")
316
+ print(os.path.abspath(os.path.dirname(__file__)))
317
  with gr.Row():
318
  with gr.Column():
319
 
 
360
  file_out = gr.File(label="Files", visible=False)
361
 
362
  outputs = [model_3d, file_out]
363
+
364
  img.upload(disable_cache, outputs=cache_dir)
365
  examples.select(set_cache, outputs=[img, cache_dir])
366
  print(f'line:404: {cache_dir}')
configs/image_cond_diffuser_asl/image-ASLDM-256.yaml CHANGED
@@ -24,7 +24,7 @@ model:
24
  target: michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
25
  params:
26
  # clip_model_version: "./checkpoints/clip/clip-vit-large-patch14"
27
- clip_model_version: "https://huggingface.co/spaces/Maikou/Michelangelo/tree/main/checkpoints/clip/clip-vit-large-patch14"
28
 
29
  loss_cfg:
30
  target: torch.nn.Identity
@@ -33,7 +33,7 @@ model:
33
  target: michelangelo.models.conditional_encoders.encoder_factory.FrozenCLIPImageGridEmbedder
34
  params:
35
  # version: "./checkpoints/clip/clip-vit-large-patch14"
36
- version: "https://huggingface.co/spaces/Maikou/Michelangelo/tree/main/checkpoints/clip/clip-vit-large-patch14"
37
  zero_embedding_radio: 0.1
38
 
39
  first_stage_key: "surface"
 
24
  target: michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
25
  params:
26
  # clip_model_version: "./checkpoints/clip/clip-vit-large-patch14"
27
+ clip_model_version: "/home/user/app/checkpoints/clip/clip-vit-large-patch14"
28
 
29
  loss_cfg:
30
  target: torch.nn.Identity
 
33
  target: michelangelo.models.conditional_encoders.encoder_factory.FrozenCLIPImageGridEmbedder
34
  params:
35
  # version: "./checkpoints/clip/clip-vit-large-patch14"
36
+ version: "/home/user/app/checkpoints/clip/clip-vit-large-patch14"
37
  zero_embedding_radio: 0.1
38
 
39
  first_stage_key: "surface"
configs/text_cond_diffuser_asl/text-ASLDM-256.yaml CHANGED
@@ -23,7 +23,7 @@ model:
23
  aligned_module_cfg:
24
  target: michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
25
  params:
26
- clip_model_version: "https://huggingface.co/spaces/Maikou/Michelangelo/tree/main/checkpoints/clip/clip-vit-large-patch14"
27
 
28
  loss_cfg:
29
  target: torch.nn.Identity
@@ -31,7 +31,7 @@ model:
31
  cond_stage_config:
32
  target: michelangelo.models.conditional_encoders.encoder_factory.FrozenAlignedCLIPTextEmbedder
33
  params:
34
- version: "https://huggingface.co/spaces/Maikou/Michelangelo/tree/main/checkpoints/clip/clip-vit-large-patch14"
35
  zero_embedding_radio: 0.1
36
  max_length: 77
37
 
 
23
  aligned_module_cfg:
24
  target: michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
25
  params:
26
+ clip_model_version: "/home/user/app/checkpoints/clip/clip-vit-large-patch14"
27
 
28
  loss_cfg:
29
  target: torch.nn.Identity
 
31
  cond_stage_config:
32
  target: michelangelo.models.conditional_encoders.encoder_factory.FrozenAlignedCLIPTextEmbedder
33
  params:
34
+ version: "/home/user/app/checkpoints/clip/clip-vit-large-patch14"
35
  zero_embedding_radio: 0.1
36
  max_length: 77
37