prithivMLmods commited on
Commit
805f947
1 Parent(s): 0332354

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -59
app.py CHANGED
@@ -1,8 +1,6 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
- import uuid
5
- from PIL import Image
6
 
7
  import spaces
8
  from diffusers import DiffusionPipeline
@@ -49,16 +47,7 @@ style_list = [
49
  STYLE_NAMES = [style["name"] for style in style_list]
50
  DEFAULT_STYLE_NAME = STYLE_NAMES[0]
51
 
52
- grid_sizes = {
53
- "2x1": (2, 1),
54
- "1x2": (1, 2),
55
- "2x2": (2, 2),
56
- "2x3": (2, 3),
57
- "3x2": (3, 2),
58
- "1x1": (1, 1)
59
- }
60
-
61
- @spaces.GPU(duration=100)
62
  def infer(
63
  prompt,
64
  negative_prompt="",
@@ -69,10 +58,9 @@ def infer(
69
  guidance_scale=0.0,
70
  num_inference_steps=4,
71
  style="Style Zero",
72
- grid_size="1x1",
73
  progress=gr.Progress(track_tqdm=True),
74
  ):
75
-
76
  selected_style = next(s for s in style_list if s["name"] == style)
77
  styled_prompt = selected_style["prompt"].format(prompt=prompt)
78
  styled_negative_prompt = selected_style["negative_prompt"]
@@ -82,33 +70,17 @@ def infer(
82
 
83
  generator = torch.Generator().manual_seed(seed)
84
 
85
- grid_size_x, grid_size_y = grid_sizes.get(grid_size, (2, 2))
86
- num_images = grid_size_x * grid_size_y
87
-
88
- images = []
89
- for _ in range(num_images):
90
- image = pipe(
91
- prompt=styled_prompt,
92
- negative_prompt=styled_negative_prompt,
93
- guidance_scale=guidance_scale,
94
- num_inference_steps=num_inference_steps,
95
- width=width,
96
- height=height,
97
- generator=generator,
98
- ).images[0]
99
- images.append(image)
100
-
101
- # Create a grid image
102
- grid_img = Image.new('RGB', (width * grid_size_x, height * grid_size_y))
103
-
104
- for i, img in enumerate(images[:num_images]):
105
- grid_img.paste(img, (i % grid_size_x * width, i // grid_size_x * height))
106
-
107
- # Save the grid image
108
- unique_name = str(uuid.uuid4()) + ".png"
109
- grid_img.save(unique_name)
110
-
111
- return unique_name, seed
112
 
113
  examples = [
114
  "Chocolate dripping from a donut against a yellow background, in the style of brocore, hyper-realistic oil --ar 2:3 --q 2 --s 750 --v 5 --ar 2:3 --q 2 --s 750 --v 5",
@@ -137,11 +109,11 @@ with gr.Blocks(css=css, theme="prithivMLmods/Minecraft-Theme") as demo:
137
  placeholder="Enter your prompt",
138
  container=False,
139
  )
140
-
141
  run_button = gr.Button("Run", scale=0, variant="primary")
142
-
143
- result = gr.Image(label="Result", show_label=False)
144
 
 
 
145
  with gr.Row(visible=True):
146
  style_selection = gr.Radio(
147
  show_label=True,
@@ -151,15 +123,8 @@ with gr.Blocks(css=css, theme="prithivMLmods/Minecraft-Theme") as demo:
151
  value=DEFAULT_STYLE_NAME,
152
  label="Quality Style",
153
  )
154
-
155
- with gr.Row(visible=False):
156
- grid_size_selection = gr.Dropdown(
157
- choices=["2x1", "1x2", "2x2", "2x3", "3x2", "1x1"],
158
- value="1x1",
159
- label="Grid Size"
160
- )
161
 
162
- with gr.Accordion("Advanced Settings", open=False, visibile=False):
163
  negative_prompt = gr.Text(
164
  label="Negative prompt",
165
  max_lines=1,
@@ -211,11 +176,7 @@ with gr.Blocks(css=css, theme="prithivMLmods/Minecraft-Theme") as demo:
211
  value=4,
212
  )
213
 
214
- gr.Examples(examples=examples,
215
- inputs=[prompt],
216
- outputs=[result, seed],
217
- fn=infer,
218
- cache_examples=True)
219
 
220
  gr.on(
221
  triggers=[run_button.click, prompt.submit],
@@ -230,10 +191,10 @@ with gr.Blocks(css=css, theme="prithivMLmods/Minecraft-Theme") as demo:
230
  guidance_scale,
231
  num_inference_steps,
232
  style_selection,
233
- grid_size_selection,
234
  ],
235
  outputs=[result, seed],
236
  )
237
 
238
  if __name__ == "__main__":
239
- demo.launch()
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
 
 
4
 
5
  import spaces
6
  from diffusers import DiffusionPipeline
 
47
  STYLE_NAMES = [style["name"] for style in style_list]
48
  DEFAULT_STYLE_NAME = STYLE_NAMES[0]
49
 
50
+ @spaces.GPU
 
 
 
 
 
 
 
 
 
51
  def infer(
52
  prompt,
53
  negative_prompt="",
 
58
  guidance_scale=0.0,
59
  num_inference_steps=4,
60
  style="Style Zero",
 
61
  progress=gr.Progress(track_tqdm=True),
62
  ):
63
+ # Apply selected style
64
  selected_style = next(s for s in style_list if s["name"] == style)
65
  styled_prompt = selected_style["prompt"].format(prompt=prompt)
66
  styled_negative_prompt = selected_style["negative_prompt"]
 
70
 
71
  generator = torch.Generator().manual_seed(seed)
72
 
73
+ image = pipe(
74
+ prompt=styled_prompt,
75
+ negative_prompt=styled_negative_prompt,
76
+ guidance_scale=guidance_scale,
77
+ num_inference_steps=num_inference_steps,
78
+ width=width,
79
+ height=height,
80
+ generator=generator,
81
+ ).images[0]
82
+
83
+ return image, seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
  examples = [
86
  "Chocolate dripping from a donut against a yellow background, in the style of brocore, hyper-realistic oil --ar 2:3 --q 2 --s 750 --v 5 --ar 2:3 --q 2 --s 750 --v 5",
 
109
  placeholder="Enter your prompt",
110
  container=False,
111
  )
112
+
113
  run_button = gr.Button("Run", scale=0, variant="primary")
 
 
114
 
115
+ result = gr.Image(label="Result", show_label=False)
116
+
117
  with gr.Row(visible=True):
118
  style_selection = gr.Radio(
119
  show_label=True,
 
123
  value=DEFAULT_STYLE_NAME,
124
  label="Quality Style",
125
  )
 
 
 
 
 
 
 
126
 
127
+ with gr.Accordion("Advanced Settings", open=False, visible=False):
128
  negative_prompt = gr.Text(
129
  label="Negative prompt",
130
  max_lines=1,
 
176
  value=4,
177
  )
178
 
179
+ gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=True)
 
 
 
 
180
 
181
  gr.on(
182
  triggers=[run_button.click, prompt.submit],
 
191
  guidance_scale,
192
  num_inference_steps,
193
  style_selection,
 
194
  ],
195
  outputs=[result, seed],
196
  )
197
 
198
  if __name__ == "__main__":
199
+ demo.launch()
200
+