Spaces:
Running
on
A10G
Running
on
A10G
Lawrence-cj
commited on
Commit
•
227fc7d
1
Parent(s):
a731861
update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ import uuid
|
|
7 |
import gradio as gr
|
8 |
import numpy as np
|
9 |
import uuid
|
10 |
-
from diffusers import PixArtAlphaPipeline,
|
11 |
import torch
|
12 |
from typing import Tuple
|
13 |
from datetime import datetime
|
@@ -19,6 +19,7 @@ DESCRIPTION = """![Logo](https://raw.githubusercontent.com/PixArt-alpha/PixArt-a
|
|
19 |
#### [LCMs](https://github.com/luosiallen/latent-consistency-model) is a diffusion distillation method which predict PF-ODE's solution directly in latent space, achieving super fast inference with few steps.
|
20 |
#### English prompts ONLY; 提示词仅限英文
|
21 |
### <span style='color: red;'> Don't want to queue or Don't get satisfied results? Try original demo: [PixArt-alpha](https://huggingface.co/spaces/PixArt-alpha/PixArt-alpha) [OpenXLab](https://openxlab.org.cn/apps/detail/PixArt-alpha/PixArt-alpha) or [Google Colab Demo](https://colab.research.google.com/drive/1jZ5UZXk7tcpTfVwnX33dDuefNMcnW9ME?usp=sharing).
|
|
|
22 |
"""
|
23 |
if not torch.cuda.is_available():
|
24 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
@@ -250,7 +251,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
250 |
minimum=1,
|
251 |
maximum=30,
|
252 |
step=1,
|
253 |
-
value=
|
254 |
)
|
255 |
gr.Examples(
|
256 |
examples=examples,
|
|
|
7 |
import gradio as gr
|
8 |
import numpy as np
|
9 |
import uuid
|
10 |
+
from diffusers import PixArtAlphaPipeline, LCMScheduler
|
11 |
import torch
|
12 |
from typing import Tuple
|
13 |
from datetime import datetime
|
|
|
19 |
#### [LCMs](https://github.com/luosiallen/latent-consistency-model) is a diffusion distillation method which predict PF-ODE's solution directly in latent space, achieving super fast inference with few steps.
|
20 |
#### English prompts ONLY; 提示词仅限英文
|
21 |
### <span style='color: red;'> Don't want to queue or Don't get satisfied results? Try original demo: [PixArt-alpha](https://huggingface.co/spaces/PixArt-alpha/PixArt-alpha) [OpenXLab](https://openxlab.org.cn/apps/detail/PixArt-alpha/PixArt-alpha) or [Google Colab Demo](https://colab.research.google.com/drive/1jZ5UZXk7tcpTfVwnX33dDuefNMcnW9ME?usp=sharing).
|
22 |
+
### Feel free to change LCM inference steps in range [4, 10] according to your needs.
|
23 |
"""
|
24 |
if not torch.cuda.is_available():
|
25 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
|
|
251 |
minimum=1,
|
252 |
maximum=30,
|
253 |
step=1,
|
254 |
+
value=6,
|
255 |
)
|
256 |
gr.Examples(
|
257 |
examples=examples,
|