CreitinGameplays commited on
Commit
5e8d978
1 Parent(s): ef9beca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +228 -109
app.py CHANGED
@@ -1,70 +1,147 @@
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
- import random
4
- from diffusers import DiffusionPipeline
 
5
  import torch
 
6
 
7
- device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
- if torch.cuda.is_available():
10
- torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
- pipe.enable_xformers_memory_efficient_attention()
13
- pipe = pipe.to(device)
14
- else:
15
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
- pipe = pipe.to(device)
17
 
18
- MAX_SEED = np.iinfo(np.int32).max
19
- MAX_IMAGE_SIZE = 1024
20
 
21
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
 
 
 
22
 
 
23
  if randomize_seed:
24
  seed = random.randint(0, MAX_SEED)
25
-
26
- generator = torch.Generator().manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- image = pipe(
29
- prompt = prompt,
30
- negative_prompt = negative_prompt,
31
- guidance_scale = guidance_scale,
32
- num_inference_steps = num_inference_steps,
33
- width = width,
34
- height = height,
35
- generator = generator
36
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  examples = [
41
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
42
- "An astronaut riding a green horse",
43
- "A delicious ceviche cheesecake slice",
 
 
 
 
 
 
44
  ]
45
 
46
- css="""
47
- #col-container {
48
- margin: 0 auto;
49
- max-width: 520px;
 
50
  }
51
- """
52
 
53
- if torch.cuda.is_available():
54
- power_device = "GPU"
55
- else:
56
- power_device = "CPU"
 
 
 
57
 
58
- with gr.Blocks(css=css) as demo:
59
-
60
- with gr.Column(elem_id="col-container"):
61
- gr.Markdown(f"""
62
- # Text-to-Image Gradio Template
63
- Currently running on {power_device}.
64
- """)
65
-
66
  with gr.Row():
67
-
68
  prompt = gr.Text(
69
  label="Prompt",
70
  show_label=False,
@@ -72,75 +149,117 @@ with gr.Blocks(css=css) as demo:
72
  placeholder="Enter your prompt",
73
  container=False,
74
  )
75
-
76
- run_button = gr.Button("Run", scale=0)
77
-
78
- result = gr.Image(label="Result", show_label=False)
79
-
80
- with gr.Accordion("Advanced Settings", open=False):
81
-
82
- negative_prompt = gr.Text(
83
  label="Negative prompt",
84
  max_lines=1,
85
  placeholder="Enter a negative prompt",
86
- visible=False,
87
  )
88
-
89
- seed = gr.Slider(
90
- label="Seed",
91
- minimum=0,
92
- maximum=MAX_SEED,
93
  step=1,
94
- value=0,
95
  )
96
-
97
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
98
-
99
- with gr.Row():
100
-
101
- width = gr.Slider(
102
- label="Width",
103
- minimum=256,
104
- maximum=MAX_IMAGE_SIZE,
105
- step=32,
106
- value=512,
107
- )
108
-
109
- height = gr.Slider(
110
- label="Height",
111
- minimum=256,
112
- maximum=MAX_IMAGE_SIZE,
113
- step=32,
114
- value=512,
115
- )
116
-
117
- with gr.Row():
118
-
119
- guidance_scale = gr.Slider(
120
- label="Guidance scale",
121
- minimum=0.0,
122
- maximum=10.0,
123
- step=0.1,
124
- value=0.0,
125
- )
126
-
127
- num_inference_steps = gr.Slider(
128
- label="Number of inference steps",
129
- minimum=1,
130
- maximum=12,
131
- step=1,
132
- value=2,
133
- )
134
-
135
- gr.Examples(
136
- examples = examples,
137
- inputs = [prompt]
138
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
- run_button.click(
141
- fn = infer,
142
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
143
- outputs = [result]
 
144
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
- demo.queue().launch()
 
 
1
+ #!/usr/bin/env python
2
+ import os
3
+ import random
4
+ import uuid
5
  import gradio as gr
6
  import numpy as np
7
+ from PIL import Image
8
+ import spaces
9
+ from typing import Tuple
10
  import torch
11
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
12
 
 
13
 
14
+ DESCRIPTION = """# InterDiffusion-4.0
15
+ ### [https://huggingface.co/cutycat2000x/InterDiffusion-4.0](https://huggingface.co/cutycat2000x/InterDiffusion-4.0)"""
 
 
 
 
 
 
16
 
 
 
17
 
18
+ def save_image(img):
19
+ unique_name = str(uuid.uuid4()) + ".png"
20
+ img.save(unique_name)
21
+ return unique_name
22
 
23
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
24
  if randomize_seed:
25
  seed = random.randint(0, MAX_SEED)
26
+ return seed
27
+
28
+
29
+
30
+ MAX_SEED = np.iinfo(np.int32).max
31
+
32
+ if not torch.cuda.is_available():
33
+ DESCRIPTION += "\n<p>Running on CPU, This may not work on CPU.</p>"
34
+
35
+ MAX_SEED = np.iinfo(np.int32).max
36
+
37
+ USE_TORCH_COMPILE = 0
38
+ ENABLE_CPU_OFFLOAD = 0
39
+
40
+
41
+
42
+
43
+ if torch.cuda.is_available():
44
+ pipe = StableDiffusionXLPipeline.from_pretrained(
45
+ "cutycat2000x/InterDiffusion-4.0",
46
+ torch_dtype=torch.float16,
47
+ use_safetensors=True,
48
+ )
49
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
50
+ pipe.load_lora_weights("cutycat2000x/LoRA2", weight_name="lora.safetensors", adapter_name="adapt")
51
+ pipe.set_adapters("adapt")
52
+ pipe.to("cuda")
53
+
54
+
55
+
56
+
57
+
58
+ style_list = [
59
+ {
60
+ "name": "(LoRA)",
61
+ "prompt": "{prompt}",
62
+ "negative_prompt": "",
63
+ },
64
+
65
 
66
+ ]
67
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
68
+ STYLE_NAMES = list(styles.keys())
69
+ DEFAULT_STYLE_NAME = "(LoRA)"
70
+
71
+ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
72
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
73
+ if not negative:
74
+ negative = ""
75
+ return p.replace("{prompt}", positive), n + negative
76
+
77
+ @spaces.GPU(enable_queue=True)
78
+ def generate(
79
+ prompt: str,
80
+ negative_prompt: str = "",
81
+ style: str = DEFAULT_STYLE_NAME,
82
+ use_negative_prompt: bool = False,
83
+ num_inference_steps: int = 30,
84
+ num_images_per_prompt: int = 2,
85
+ seed: int = 0,
86
+ width: int = 1024,
87
+ height: int = 1024,
88
+ guidance_scale: float = 3,
89
+ randomize_seed: bool = False,
90
+ progress=gr.Progress(track_tqdm=True),
91
+ ):
92
+
93
 
94
+ seed = int(randomize_seed_fn(seed, randomize_seed))
95
+
96
+ if not use_negative_prompt:
97
+ negative_prompt = "" # type: ignore
98
+ prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
99
+
100
+ images = pipe(
101
+ prompt=prompt,
102
+ negative_prompt=negative_prompt,
103
+ width=width,
104
+ height=height,
105
+ guidance_scale=guidance_scale,
106
+ num_inference_steps=num_inference_steps,
107
+ num_images_per_prompt=num_images_per_prompt,
108
+ cross_attention_kwargs={"scale": 0.65},
109
+ output_type="pil",
110
+ ).images
111
+ image_paths = [save_image(img) for img in images]
112
+ print(image_paths)
113
+ return image_paths, seed
114
 
115
  examples = [
116
+ 'a smiling girl with sparkles in her eyes, walking in a garden, in the morning --style anime',
117
+ 'firewatch landscape, Graphic Novel, Pastel Art, Poster, Golden Hour, Electric Colors, 4k, RGB, Geometric, Volumetric, Lumen Global Illumination, Ray Tracing Reflections, Twisted Rays, Glowing Edges, RTX --raw',
118
+ 'Samsung Galaxy S9',
119
+ 'cat, 4k, 8k, hyperrealistic, realistic, High-resolution, unreal engine 5, rtx, 16k, taken on a sony camera, Cinematic, dramatic lighting',
120
+ 'cinimatic closeup of burning skull',
121
+ 'frozen elsa',
122
+ 'A rainbow tree, anime style, tree in focus',
123
+ 'A cat holding a sign that reads "Hello World" in cursive text',
124
+ 'A birthday card for "Meow"'
125
  ]
126
 
127
+ css = '''
128
+ .gradio-container{max-width: 560px !important}
129
+ h1{text-align:center}
130
+ footer {
131
+ visibility: hidden
132
  }
133
+ '''
134
 
135
+ with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo:
136
+ gr.Markdown(DESCRIPTION)
137
+ gr.DuplicateButton(
138
+ value="Duplicate Space for private use",
139
+ elem_id="duplicate-button",
140
+ visible=False,
141
+ )
142
 
143
+ with gr.Group():
 
 
 
 
 
 
 
144
  with gr.Row():
 
145
  prompt = gr.Text(
146
  label="Prompt",
147
  show_label=False,
 
149
  placeholder="Enter your prompt",
150
  container=False,
151
  )
152
+ run_button = gr.Button("Run")
153
+ result = gr.Gallery(label="Result", columns=1, preview=True)
154
+ with gr.Accordion("Advanced options", open=False):
155
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False, visible=True)
156
+ negative_prompt = gr.Text(
 
 
 
157
  label="Negative prompt",
158
  max_lines=1,
159
  placeholder="Enter a negative prompt",
160
+ visible=True,
161
  )
162
+ with gr.Row():
163
+ num_inference_steps = gr.Slider(
164
+ label="Steps",
165
+ minimum=10,
166
+ maximum=60,
167
  step=1,
168
+ value=30,
169
  )
170
+ with gr.Row():
171
+ num_images_per_prompt = gr.Slider(
172
+ label="Images",
173
+ minimum=1,
174
+ maximum=5,
175
+ step=1,
176
+ value=2,
177
+ )
178
+ seed = gr.Slider(
179
+ label="Seed",
180
+ minimum=0,
181
+ maximum=MAX_SEED,
182
+ step=1,
183
+ value=0,
184
+ visible=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  )
186
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
187
+ with gr.Row(visible=True):
188
+ width = gr.Slider(
189
+ label="Width",
190
+ minimum=512,
191
+ maximum=2048,
192
+ step=8,
193
+ value=1024,
194
+ )
195
+ height = gr.Slider(
196
+ label="Height",
197
+ minimum=512,
198
+ maximum=2048,
199
+ step=8,
200
+ value=1024,
201
+ )
202
+ with gr.Row():
203
+ guidance_scale = gr.Slider(
204
+ label="Guidance Scale",
205
+ minimum=0.1,
206
+ maximum=20.0,
207
+ step=0.1,
208
+ value=6,
209
+ )
210
+ with gr.Row(visible=True):
211
+ style_selection = gr.Radio(
212
+ show_label=True,
213
+ container=True,
214
+ interactive=True,
215
+ choices=STYLE_NAMES,
216
+ value=DEFAULT_STYLE_NAME,
217
+ label="Image Style",
218
+ )
219
+
220
+
221
+ gr.Examples(
222
+ examples=examples,
223
+ inputs=prompt,
224
+ outputs=[result, seed],
225
+ fn=generate,
226
+ cache_examples=False,
227
+ )
228
 
229
+ use_negative_prompt.change(
230
+ fn=lambda x: gr.update(visible=x),
231
+ inputs=use_negative_prompt,
232
+ outputs=negative_prompt,
233
+ api_name=False,
234
  )
235
+
236
+
237
+
238
+ gr.on(
239
+ triggers=[
240
+ prompt.submit,
241
+ negative_prompt.submit,
242
+ run_button.click,
243
+ ],
244
+ fn=generate,
245
+ inputs=[
246
+ prompt,
247
+ negative_prompt,
248
+ style_selection,
249
+ use_negative_prompt,
250
+ num_inference_steps,
251
+ num_images_per_prompt,
252
+ seed,
253
+ width,
254
+ height,
255
+ guidance_scale,
256
+ randomize_seed,
257
+ ],
258
+ outputs=[result, seed],
259
+ api_name="run",
260
+ )
261
+
262
+
263
 
264
+ if __name__ == "__main__":
265
+ demo.queue(max_size=20).launch(show_api=False, debug=False)