Spaces:
Running
Running
Update
Browse files- .pre-commit-config.yaml +26 -12
- .style.yapf +0 -5
- .vscode/settings.json +11 -8
- README.md +1 -1
- app.py +92 -77
- requirements.txt +6 -6
.pre-commit-config.yaml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
repos:
|
2 |
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
-
rev: v4.
|
4 |
hooks:
|
5 |
- id: check-executables-have-shebangs
|
6 |
- id: check-json
|
@@ -8,29 +8,43 @@ repos:
|
|
8 |
- id: check-shebang-scripts-are-executable
|
9 |
- id: check-toml
|
10 |
- id: check-yaml
|
11 |
-
- id: double-quote-string-fixer
|
12 |
- id: end-of-file-fixer
|
13 |
- id: mixed-line-ending
|
14 |
-
args: [
|
15 |
- id: requirements-txt-fixer
|
16 |
- id: trailing-whitespace
|
17 |
- repo: https://github.com/myint/docformatter
|
18 |
-
rev: v1.
|
19 |
hooks:
|
20 |
- id: docformatter
|
21 |
-
args: [
|
22 |
- repo: https://github.com/pycqa/isort
|
23 |
rev: 5.12.0
|
24 |
hooks:
|
25 |
- id: isort
|
|
|
26 |
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
-
rev:
|
28 |
hooks:
|
29 |
- id: mypy
|
30 |
-
args: [
|
31 |
-
additional_dependencies: [
|
32 |
-
- repo: https://github.com/
|
33 |
-
rev:
|
34 |
hooks:
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
repos:
|
2 |
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v4.4.0
|
4 |
hooks:
|
5 |
- id: check-executables-have-shebangs
|
6 |
- id: check-json
|
|
|
8 |
- id: check-shebang-scripts-are-executable
|
9 |
- id: check-toml
|
10 |
- id: check-yaml
|
|
|
11 |
- id: end-of-file-fixer
|
12 |
- id: mixed-line-ending
|
13 |
+
args: ["--fix=lf"]
|
14 |
- id: requirements-txt-fixer
|
15 |
- id: trailing-whitespace
|
16 |
- repo: https://github.com/myint/docformatter
|
17 |
+
rev: v1.7.5
|
18 |
hooks:
|
19 |
- id: docformatter
|
20 |
+
args: ["--in-place"]
|
21 |
- repo: https://github.com/pycqa/isort
|
22 |
rev: 5.12.0
|
23 |
hooks:
|
24 |
- id: isort
|
25 |
+
args: ["--profile", "black"]
|
26 |
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
+
rev: v1.5.1
|
28 |
hooks:
|
29 |
- id: mypy
|
30 |
+
args: ["--ignore-missing-imports"]
|
31 |
+
additional_dependencies: ["types-python-slugify", "types-requests", "types-PyYAML"]
|
32 |
+
- repo: https://github.com/psf/black
|
33 |
+
rev: 23.9.1
|
34 |
hooks:
|
35 |
+
- id: black
|
36 |
+
language_version: python3.10
|
37 |
+
args: ["--line-length", "119"]
|
38 |
+
- repo: https://github.com/kynan/nbstripout
|
39 |
+
rev: 0.6.1
|
40 |
+
hooks:
|
41 |
+
- id: nbstripout
|
42 |
+
args: ["--extra-keys", "metadata.interpreter metadata.kernelspec cell.metadata.pycharm"]
|
43 |
+
- repo: https://github.com/nbQA-dev/nbQA
|
44 |
+
rev: 1.7.0
|
45 |
+
hooks:
|
46 |
+
- id: nbqa-black
|
47 |
+
- id: nbqa-pyupgrade
|
48 |
+
args: ["--py37-plus"]
|
49 |
+
- id: nbqa-isort
|
50 |
+
args: ["--float-to-top"]
|
.style.yapf
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
[style]
|
2 |
-
based_on_style = pep8
|
3 |
-
blank_line_before_nested_class_or_def = false
|
4 |
-
spaces_before_comment = 2
|
5 |
-
split_before_logical_operator = true
|
|
|
|
|
|
|
|
|
|
|
|
.vscode/settings.json
CHANGED
@@ -1,18 +1,21 @@
|
|
1 |
{
|
2 |
-
"python.linting.enabled": true,
|
3 |
-
"python.linting.flake8Enabled": true,
|
4 |
-
"python.linting.pylintEnabled": false,
|
5 |
-
"python.linting.lintOnSave": true,
|
6 |
-
"python.formatting.provider": "yapf",
|
7 |
-
"python.formatting.yapfArgs": [
|
8 |
-
"--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
|
9 |
-
],
|
10 |
"[python]": {
|
|
|
11 |
"editor.formatOnType": true,
|
12 |
"editor.codeActionsOnSave": {
|
13 |
"source.organizeImports": true
|
14 |
}
|
15 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"editor.formatOnSave": true,
|
17 |
"files.insertFinalNewline": true
|
18 |
}
|
|
|
1 |
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
"[python]": {
|
3 |
+
"editor.defaultFormatter": "ms-python.black-formatter",
|
4 |
"editor.formatOnType": true,
|
5 |
"editor.codeActionsOnSave": {
|
6 |
"source.organizeImports": true
|
7 |
}
|
8 |
},
|
9 |
+
"black-formatter.args": [
|
10 |
+
"--line-length=119"
|
11 |
+
],
|
12 |
+
"isort.args": ["--profile", "black"],
|
13 |
+
"flake8.args": [
|
14 |
+
"--max-line-length=119"
|
15 |
+
],
|
16 |
+
"ruff.args": [
|
17 |
+
"--line-length=119"
|
18 |
+
],
|
19 |
"editor.formatOnSave": true,
|
20 |
"files.insertFinalNewline": true
|
21 |
}
|
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 📚
|
|
4 |
colorFrom: purple
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
4 |
colorFrom: purple
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.44.4
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
app.py
CHANGED
@@ -8,37 +8,34 @@ import random
|
|
8 |
import gradio as gr
|
9 |
import numpy as np
|
10 |
import PIL.Image
|
|
|
11 |
import torch
|
12 |
from diffusers import DDPMScheduler, DiffusionPipeline
|
13 |
|
14 |
-
DESCRIPTION =
|
15 |
if not torch.cuda.is_available():
|
16 |
-
DESCRIPTION +=
|
17 |
|
18 |
MAX_SEED = np.iinfo(np.int32).max
|
19 |
-
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
|
20 |
-
|
21 |
-
|
22 |
-
USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
|
23 |
|
24 |
-
device = torch.device(
|
25 |
if torch.cuda.is_available():
|
26 |
pipe_prior = DiffusionPipeline.from_pretrained(
|
27 |
-
|
|
|
28 |
pipe_prior.to(device)
|
29 |
|
30 |
-
scheduler = DDPMScheduler.from_pretrained(
|
31 |
-
'kandinsky-community/kandinsky-2-1', subfolder='ddpm_scheduler')
|
32 |
pipe = DiffusionPipeline.from_pretrained(
|
33 |
-
|
34 |
-
|
35 |
-
torch_dtype=torch.float16)
|
36 |
pipe.to(device)
|
37 |
if USE_TORCH_COMPILE:
|
38 |
pipe.unet.to(memory_format=torch.channels_last)
|
39 |
-
pipe.unet = torch.compile(pipe.unet,
|
40 |
-
mode='reduce-overhead',
|
41 |
-
fullgraph=True)
|
42 |
else:
|
43 |
pipe_prior = None
|
44 |
pipe = None
|
@@ -50,15 +47,18 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
50 |
return seed
|
51 |
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
62 |
generator = torch.Generator().manual_seed(seed)
|
63 |
image_embeds, negative_image_embeds = pipe_prior(
|
64 |
prompt,
|
@@ -81,85 +81,98 @@ def generate(prompt: str,
|
|
81 |
|
82 |
|
83 |
examples = [
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
]
|
92 |
|
93 |
-
with gr.Blocks(css=
|
94 |
gr.Markdown(DESCRIPTION)
|
95 |
-
gr.DuplicateButton(
|
96 |
-
|
97 |
-
|
|
|
|
|
98 |
with gr.Box():
|
99 |
with gr.Row():
|
100 |
prompt = gr.Text(
|
101 |
-
label=
|
102 |
show_label=False,
|
103 |
max_lines=1,
|
104 |
-
placeholder=
|
105 |
container=False,
|
106 |
)
|
107 |
-
run_button = gr.Button(
|
108 |
-
result = gr.Image(label=
|
109 |
-
with gr.Accordion(
|
110 |
negative_prompt = gr.Text(
|
111 |
-
label=
|
112 |
-
value=
|
113 |
max_lines=1,
|
114 |
-
placeholder=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
)
|
116 |
-
|
117 |
-
minimum=0,
|
118 |
-
maximum=MAX_SEED,
|
119 |
-
step=1,
|
120 |
-
value=0)
|
121 |
-
randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
|
122 |
width = gr.Slider(
|
123 |
-
label=
|
124 |
minimum=256,
|
125 |
maximum=MAX_IMAGE_SIZE,
|
126 |
step=32,
|
127 |
value=768,
|
128 |
)
|
129 |
height = gr.Slider(
|
130 |
-
label=
|
131 |
minimum=256,
|
132 |
maximum=MAX_IMAGE_SIZE,
|
133 |
step=32,
|
134 |
value=768,
|
135 |
)
|
136 |
-
guidance_scale_prior = gr.Slider(
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
|
|
|
|
|
|
|
|
146 |
num_inference_steps_prior = gr.Slider(
|
147 |
-
label=
|
148 |
minimum=10,
|
149 |
maximum=100,
|
150 |
step=1,
|
151 |
-
value=50
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
inputs = [
|
165 |
prompt,
|
@@ -182,7 +195,7 @@ with gr.Blocks(css='style.css') as demo:
|
|
182 |
fn=generate,
|
183 |
inputs=inputs,
|
184 |
outputs=result,
|
185 |
-
api_name=
|
186 |
)
|
187 |
negative_prompt.submit(
|
188 |
fn=randomize_seed_fn,
|
@@ -208,4 +221,6 @@ with gr.Blocks(css='style.css') as demo:
|
|
208 |
outputs=result,
|
209 |
api_name=False,
|
210 |
)
|
211 |
-
|
|
|
|
|
|
8 |
import gradio as gr
|
9 |
import numpy as np
|
10 |
import PIL.Image
|
11 |
+
import spaces
|
12 |
import torch
|
13 |
from diffusers import DDPMScheduler, DiffusionPipeline
|
14 |
|
15 |
+
DESCRIPTION = "# Kandinsky 2.1"
|
16 |
if not torch.cuda.is_available():
|
17 |
+
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
18 |
|
19 |
MAX_SEED = np.iinfo(np.int32).max
|
20 |
+
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
21 |
+
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768"))
|
22 |
+
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
|
|
|
23 |
|
24 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
25 |
if torch.cuda.is_available():
|
26 |
pipe_prior = DiffusionPipeline.from_pretrained(
|
27 |
+
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
|
28 |
+
)
|
29 |
pipe_prior.to(device)
|
30 |
|
31 |
+
scheduler = DDPMScheduler.from_pretrained("kandinsky-community/kandinsky-2-1", subfolder="ddpm_scheduler")
|
|
|
32 |
pipe = DiffusionPipeline.from_pretrained(
|
33 |
+
"kandinsky-community/kandinsky-2-1", scheduler=scheduler, torch_dtype=torch.float16
|
34 |
+
)
|
|
|
35 |
pipe.to(device)
|
36 |
if USE_TORCH_COMPILE:
|
37 |
pipe.unet.to(memory_format=torch.channels_last)
|
38 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
|
|
|
|
39 |
else:
|
40 |
pipe_prior = None
|
41 |
pipe = None
|
|
|
47 |
return seed
|
48 |
|
49 |
|
50 |
+
@spaces.GPU
|
51 |
+
def generate(
|
52 |
+
prompt: str,
|
53 |
+
negative_prompt: str = "low quality, bad quality",
|
54 |
+
seed: int = 0,
|
55 |
+
width: int = 768,
|
56 |
+
height: int = 768,
|
57 |
+
guidance_scale_prior: float = 1.0,
|
58 |
+
guidance_scale: float = 4.0,
|
59 |
+
num_inference_steps_prior: int = 50,
|
60 |
+
num_inference_steps: int = 100,
|
61 |
+
) -> PIL.Image.Image:
|
62 |
generator = torch.Generator().manual_seed(seed)
|
63 |
image_embeds, negative_image_embeds = pipe_prior(
|
64 |
prompt,
|
|
|
81 |
|
82 |
|
83 |
examples = [
|
84 |
+
"An astronaut riding a horse",
|
85 |
+
"portrait of a young woman, blue eyes, cinematic",
|
86 |
+
"A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting",
|
87 |
+
"bird eye view shot of a full body woman with cyan light orange magenta makeup, digital art, long braided hair her face separated by makeup in the style of yin Yang surrealism, symmetrical face, real image, contrasting tone, pastel gradient background",
|
88 |
+
"A car exploding into colorful dust",
|
89 |
+
"editorial photography of an organic, almost liquid smoke style armchair",
|
90 |
+
"birds eye view of a quilted paper style alien planet landscape, vibrant colours, Cinematic lighting",
|
91 |
]
|
92 |
|
93 |
+
with gr.Blocks(css="style.css") as demo:
|
94 |
gr.Markdown(DESCRIPTION)
|
95 |
+
gr.DuplicateButton(
|
96 |
+
value="Duplicate Space for private use",
|
97 |
+
elem_id="duplicate-button",
|
98 |
+
visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
|
99 |
+
)
|
100 |
with gr.Box():
|
101 |
with gr.Row():
|
102 |
prompt = gr.Text(
|
103 |
+
label="Prompt",
|
104 |
show_label=False,
|
105 |
max_lines=1,
|
106 |
+
placeholder="Enter your prompt",
|
107 |
container=False,
|
108 |
)
|
109 |
+
run_button = gr.Button("Run", scale=0)
|
110 |
+
result = gr.Image(label="Result", show_label=False)
|
111 |
+
with gr.Accordion("Advanced options", open=False):
|
112 |
negative_prompt = gr.Text(
|
113 |
+
label="Negative prompt",
|
114 |
+
value="low quality, bad quality",
|
115 |
max_lines=1,
|
116 |
+
placeholder="Enter a negative prompt",
|
117 |
+
)
|
118 |
+
seed = gr.Slider(
|
119 |
+
label="Seed",
|
120 |
+
minimum=0,
|
121 |
+
maximum=MAX_SEED,
|
122 |
+
step=1,
|
123 |
+
value=0,
|
124 |
)
|
125 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
|
|
|
|
|
|
|
|
|
|
126 |
width = gr.Slider(
|
127 |
+
label="Width",
|
128 |
minimum=256,
|
129 |
maximum=MAX_IMAGE_SIZE,
|
130 |
step=32,
|
131 |
value=768,
|
132 |
)
|
133 |
height = gr.Slider(
|
134 |
+
label="Height",
|
135 |
minimum=256,
|
136 |
maximum=MAX_IMAGE_SIZE,
|
137 |
step=32,
|
138 |
value=768,
|
139 |
)
|
140 |
+
guidance_scale_prior = gr.Slider(
|
141 |
+
label="Guidance scale for prior",
|
142 |
+
minimum=1,
|
143 |
+
maximum=20,
|
144 |
+
step=0.1,
|
145 |
+
value=4.0,
|
146 |
+
)
|
147 |
+
guidance_scale = gr.Slider(
|
148 |
+
label="Guidance scale",
|
149 |
+
minimum=1,
|
150 |
+
maximum=20,
|
151 |
+
step=0.1,
|
152 |
+
value=4.0,
|
153 |
+
)
|
154 |
num_inference_steps_prior = gr.Slider(
|
155 |
+
label="Number of inference steps for prior",
|
156 |
minimum=10,
|
157 |
maximum=100,
|
158 |
step=1,
|
159 |
+
value=50,
|
160 |
+
)
|
161 |
+
num_inference_steps = gr.Slider(
|
162 |
+
label="Number of inference steps",
|
163 |
+
minimum=10,
|
164 |
+
maximum=150,
|
165 |
+
step=1,
|
166 |
+
value=100,
|
167 |
+
)
|
168 |
+
|
169 |
+
gr.Examples(
|
170 |
+
examples=examples,
|
171 |
+
inputs=prompt,
|
172 |
+
outputs=result,
|
173 |
+
fn=generate,
|
174 |
+
cache_examples=CACHE_EXAMPLES,
|
175 |
+
)
|
176 |
|
177 |
inputs = [
|
178 |
prompt,
|
|
|
195 |
fn=generate,
|
196 |
inputs=inputs,
|
197 |
outputs=result,
|
198 |
+
api_name="run",
|
199 |
)
|
200 |
negative_prompt.submit(
|
201 |
fn=randomize_seed_fn,
|
|
|
221 |
outputs=result,
|
222 |
api_name=False,
|
223 |
)
|
224 |
+
|
225 |
+
if __name__ == "__main__":
|
226 |
+
demo.queue(max_size=20).launch()
|
requirements.txt
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
accelerate==0.
|
2 |
-
diffusers==0.
|
3 |
-
gradio==3.
|
4 |
-
torch==2.0.
|
5 |
-
torchvision==0.15.
|
6 |
-
transformers==4.
|
|
|
1 |
+
accelerate==0.23.0
|
2 |
+
diffusers==0.21.2
|
3 |
+
gradio==3.44.4
|
4 |
+
torch==2.0.0
|
5 |
+
torchvision==0.15.1
|
6 |
+
transformers==4.33.2
|