jschen commited on
Commit
3f3a332
1 Parent(s): 936212f

update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -18
app.py CHANGED
@@ -9,20 +9,21 @@ import random
9
  import gradio as gr
10
  import numpy as np
11
  import uuid
12
- from diffusers import PixArtAlphaPipeline, LatentConsistencyModelPipeline, LCMScheduler
13
  import torch
14
  from typing import Tuple
15
  from datetime import datetime
16
 
17
 
18
- DESCRIPTION = """![Logo](./pixart-lcm.png)
19
- # PixArt-Alpha LCM 1024px
20
- #### [PixArt-Alpha 1024px](https://github.com/PixArt-alpha/PixArt-alpha) is a transformer-based text-to-image diffusion system trained on text embeddings from T5. This demo uses the [PixArt-alpha/PixArt-XL-2-1024-MS](https://huggingface.co/PixArt-alpha/PixArt-XL-2-1024-MS) checkpoint.
 
21
  #### English prompts ONLY; 提示词仅限英文
22
  Don't want to queue? Try [OpenXLab](https://openxlab.org.cn/apps/detail/PixArt-alpha/PixArt-alpha) or [Google Colab Demo](https://colab.research.google.com/drive/1jZ5UZXk7tcpTfVwnX33dDuefNMcnW9ME?usp=sharing).
23
  """
24
  if not torch.cuda.is_available():
25
- DESCRIPTION += "\n<p>Running on CPU �� This demo does not work on CPU.</p>"
26
 
27
  MAX_SEED = np.iinfo(np.int32).max
28
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
@@ -103,16 +104,11 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
103
  if torch.cuda.is_available():
104
 
105
  pipe = PixArtAlphaPipeline.from_pretrained(
106
- # "PixArt-alpha/PixArt-LCM-XL-2-1024-MS",
107
- 'output_cv/pixartlcm-xl2-img1024_ft_singlebr_MJ1-5filter_vae_lr2e5_ema80/checkpoints/epoch_1_step_1600_diffusers',
108
  torch_dtype=torch.float16,
109
  use_safetensors=True,
110
  )
111
 
112
- if os.getenv('CONSISTENCY_DECODER', False):
113
- print("Using DALL-E 3 Consistency Decoder")
114
- pipe.vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
115
-
116
  if ENABLE_CPU_OFFLOAD:
117
  pipe.enable_model_cpu_offload()
118
  else:
@@ -211,7 +207,13 @@ with gr.Blocks(css="scripts/style.css") as demo:
211
  result = gr.Gallery(label="Result", columns=NUM_IMAGES_PER_PROMPT, show_label=False)
212
  with gr.Accordion("Advanced options", open=False):
213
  with gr.Row():
214
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False, visible=False)
 
 
 
 
 
 
215
  style_selection = gr.Radio(
216
  show_label=True,
217
  container=True,
@@ -220,12 +222,6 @@ with gr.Blocks(css="scripts/style.css") as demo:
220
  value=DEFAULT_STYLE_NAME,
221
  label="Image Style",
222
  )
223
- negative_prompt = gr.Text(
224
- label="Negative prompt",
225
- max_lines=1,
226
- placeholder="Enter a negative prompt",
227
- visible=False,
228
- )
229
  seed = gr.Slider(
230
  label="Seed",
231
  minimum=0,
 
9
  import gradio as gr
10
  import numpy as np
11
  import uuid
12
+ from diffusers import PixArtAlphaPipeline, LCMScheduler
13
  import torch
14
  from typing import Tuple
15
  from datetime import datetime
16
 
17
 
18
+ DESCRIPTION = """![Logo](https://raw.githubusercontent.com/PixArt-alpha/PixArt-alpha.github.io/master/static/images/pixart-lcm.png)
19
+ # PixArt-LCM 1024px
20
+ #### [PixArt-Alpha 1024px](https://github.com/PixArt-alpha/PixArt-alpha) is a transformer-based text-to-image diffusion system trained on text embeddings from T5. This demo uses the [PixArt-alpha/PixArt-LCM-XL-2-1024-MS](https://huggingface.co/PixArt-alpha/PixArt-LCM-XL-2-1024-MS) checkpoint.
21
+ #### [LCMs](https://github.com/luosiallen/latent-consistency-model) is a diffusion distillation method which predict PF-ODE's solution directly in latent space, achieving super fast inference with few steps.
22
  #### English prompts ONLY; 提示词仅限英文
23
  Don't want to queue? Try [OpenXLab](https://openxlab.org.cn/apps/detail/PixArt-alpha/PixArt-alpha) or [Google Colab Demo](https://colab.research.google.com/drive/1jZ5UZXk7tcpTfVwnX33dDuefNMcnW9ME?usp=sharing).
24
  """
25
  if not torch.cuda.is_available():
26
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
27
 
28
  MAX_SEED = np.iinfo(np.int32).max
29
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
 
104
  if torch.cuda.is_available():
105
 
106
  pipe = PixArtAlphaPipeline.from_pretrained(
107
+ "PixArt-alpha/PixArt-LCM-XL-2-1024-MS",
 
108
  torch_dtype=torch.float16,
109
  use_safetensors=True,
110
  )
111
 
 
 
 
 
112
  if ENABLE_CPU_OFFLOAD:
113
  pipe.enable_model_cpu_offload()
114
  else:
 
207
  result = gr.Gallery(label="Result", columns=NUM_IMAGES_PER_PROMPT, show_label=False)
208
  with gr.Accordion("Advanced options", open=False):
209
  with gr.Row():
210
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False, visible=True)
211
+ negative_prompt = gr.Text(
212
+ label="Negative prompt",
213
+ max_lines=1,
214
+ placeholder="Enter a negative prompt",
215
+ visible=True,
216
+ )
217
  style_selection = gr.Radio(
218
  show_label=True,
219
  container=True,
 
222
  value=DEFAULT_STYLE_NAME,
223
  label="Image Style",
224
  )
 
 
 
 
 
 
225
  seed = gr.Slider(
226
  label="Seed",
227
  minimum=0,