prithivMLmods commited on
Commit
e141ac9
1 Parent(s): 3022558

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +220 -0
app.py CHANGED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import uuid
4
+ import json
5
+ import gradio as gr
6
+ import numpy as np
7
+ from PIL import Image
8
+ import spaces
9
+ import torch
10
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
+
12
+ #Load the HTML content
13
+ #html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
14
+ #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:180px; border:none;"></iframe>'
15
+ #html_file_url = "https://prithivmlmods-static-loading-theme.static.hf.space/index.html"
16
+
17
+ html_file_url = "https://prithivhamster.vercel.app/"
18
+ html_content = f'<iframe src="{html_file_url}" style="width:100%; height:400px; border:none"></iframe>'
19
+
20
+ DESCRIPTIONx = """## REALVISXL V5⚡
21
+
22
+ """
23
+
24
+ css = '''
25
+ .gradio-container{max-width: 560px !important}
26
+ h1{text-align:center}
27
+ footer {
28
+ visibility: hidden
29
+ }
30
+ '''
31
+
32
+ examples = [
33
+ "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
34
+ "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
35
+ ]
36
+
37
+ MODEL_ID = os.getenv("MODEL_VAL_PATH", "SG161222/RealVisXL_V4.0_Lightning")
38
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
39
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
40
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
41
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
42
+
43
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
44
+ pipe = StableDiffusionXLPipeline.from_pretrained(
45
+ MODEL_ID,
46
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
47
+ use_safetensors=True,
48
+ add_watermarker=False,
49
+ ).to(device)
50
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
51
+
52
+ # <compile speedup >
53
+ if USE_TORCH_COMPILE:
54
+ pipe.compile()
55
+
56
+ if ENABLE_CPU_OFFLOAD:
57
+ pipe.enable_model_cpu_offload()
58
+
59
+ MAX_SEED = np.iinfo(np.int32).max
60
+
61
+ def save_image(img):
62
+ unique_name = str(uuid.uuid4()) + ".png"
63
+ img.save(unique_name)
64
+ return unique_name
65
+
66
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
67
+ if randomize_seed:
68
+ seed = random.randint(0, MAX_SEED)
69
+ return seed
70
+
71
+ @spaces.GPU(duration=60, enable_queue=True)
72
+ def generate(
73
+ prompt: str,
74
+ negative_prompt: str = "",
75
+ use_negative_prompt: bool = False,
76
+ seed: int = 1,
77
+ width: int = 1024,
78
+ height: int = 1024,
79
+ guidance_scale: float = 3,
80
+ num_inference_steps: int = 25,
81
+ randomize_seed: bool = False,
82
+ use_resolution_binning: bool = True,
83
+ num_images: int = 1, # Number of images to generate
84
+ progress=gr.Progress(track_tqdm=True),
85
+ ):
86
+ seed = int(randomize_seed_fn(seed, randomize_seed))
87
+ generator = torch.Generator(device=device).manual_seed(seed)
88
+
89
+ options = {
90
+ "prompt": [prompt] * num_images,
91
+ "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
92
+ "width": width,
93
+ "height": height,
94
+ "guidance_scale": guidance_scale,
95
+ "num_inference_steps": num_inference_steps,
96
+ "generator": generator,
97
+ "output_type": "pil",
98
+ }
99
+
100
+ if use_resolution_binning:
101
+ options["use_resolution_binning"] = True
102
+
103
+ images = []
104
+ for i in range(0, num_images, BATCH_SIZE):
105
+ batch_options = options.copy()
106
+ batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
107
+ if "negative_prompt" in batch_options:
108
+ batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
109
+ images.extend(pipe(**batch_options).images)
110
+
111
+ image_paths = [save_image(img) for img in images]
112
+ return image_paths, seed
113
+
114
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
115
+ gr.Markdown(DESCRIPTIONx)
116
+ with gr.Group():
117
+ with gr.Row():
118
+ prompt = gr.Text(
119
+ label="Prompt",
120
+ show_label=False,
121
+ max_lines=1,
122
+ placeholder="Enter your prompt",
123
+ container=False,
124
+ )
125
+ run_button = gr.Button("Run", scale=0)
126
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
127
+ with gr.Accordion("Advanced options", open=False, visible=True):
128
+ num_images = gr.Slider(
129
+ label="Number of Images",
130
+ minimum=1,
131
+ maximum=4,
132
+ step=1,
133
+ value=1,
134
+ )
135
+ with gr.Row():
136
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
137
+ negative_prompt = gr.Text(
138
+ label="Negative prompt",
139
+ max_lines=5,
140
+ lines=4,
141
+ placeholder="Enter a negative prompt",
142
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
143
+ visible=True,
144
+ )
145
+ seed = gr.Slider(
146
+ label="Seed",
147
+ minimum=0,
148
+ maximum=MAX_SEED,
149
+ step=1,
150
+ value=0,
151
+ )
152
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
153
+ with gr.Row(visible=True):
154
+ width = gr.Slider(
155
+ label="Width",
156
+ minimum=512,
157
+ maximum=MAX_IMAGE_SIZE,
158
+ step=64,
159
+ value=1024,
160
+ )
161
+ height = gr.Slider(
162
+ label="Height",
163
+ minimum=512,
164
+ maximum=MAX_IMAGE_SIZE,
165
+ step=64,
166
+ value=1024,
167
+ )
168
+ with gr.Row():
169
+ guidance_scale = gr.Slider(
170
+ label="Guidance Scale",
171
+ minimum=0.1,
172
+ maximum=6,
173
+ step=0.1,
174
+ value=3.0,
175
+ )
176
+ num_inference_steps = gr.Slider(
177
+ label="Number of inference steps",
178
+ minimum=1,
179
+ maximum=25,
180
+ step=1,
181
+ value=23,
182
+ )
183
+
184
+ gr.Examples(
185
+ examples=examples,
186
+ inputs=prompt,
187
+ cache_examples=False
188
+ )
189
+
190
+ use_negative_prompt.change(
191
+ fn=lambda x: gr.update(visible=x),
192
+ inputs=use_negative_prompt,
193
+ outputs=negative_prompt,
194
+ api_name=False,
195
+ )
196
+ gr.on(
197
+ triggers=[
198
+ prompt.submit,
199
+ negative_prompt.submit,
200
+ run_button.click,
201
+ ],
202
+ fn=generate,
203
+ inputs=[
204
+ prompt,
205
+ negative_prompt,
206
+ use_negative_prompt,
207
+ seed,
208
+ width,
209
+ height,
210
+ guidance_scale,
211
+ num_inference_steps,
212
+ randomize_seed,
213
+ num_images
214
+ ],
215
+ outputs=[result, seed],
216
+ api_name="run",
217
+ )
218
+
219
+ if __name__ == "__main__":
220
+ demo.queue(max_size=40).launch()