dreamdrop-art commited on
Commit
f467a89
1 Parent(s): 29d2d55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -45
app.py CHANGED
@@ -10,6 +10,32 @@ from PIL import Image
10
  import re
11
 
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  class Prodia:
15
  def __init__(self, api_key, base=None):
@@ -17,19 +43,19 @@ class Prodia:
17
  self.headers = {
18
  "X-Prodia-Key": api_key
19
  }
20
-
21
  def generate(self, params):
22
  response = self._post(f"{self.base}/sd/generate", params)
23
  return response.json()
24
-
25
  def transform(self, params):
26
  response = self._post(f"{self.base}/sd/transform", params)
27
  return response.json()
28
-
29
  def controlnet(self, params):
30
  response = self._post(f"{self.base}/sd/controlnet", params)
31
  return response.json()
32
-
33
  def get_job(self, job_id):
34
  response = self._get(f"{self.base}/job/{job_id}")
35
  return response.json()
@@ -76,7 +102,7 @@ def image_to_base64(image):
76
  # Convert the image to bytes
77
  buffered = BytesIO()
78
  image.save(buffered, format="PNG") # You can change format to PNG if needed
79
-
80
  # Encode the bytes to base64
81
  img_str = base64.b64encode(buffered.getvalue())
82
 
@@ -100,11 +126,11 @@ def get_data(text):
100
  'negative_prompt': r'Negative prompt: (.*)',
101
  'steps': r'Steps: (\d+),',
102
  'seed': r'Seed: (\d+),',
103
- 'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
104
  'model': r'Model:\s*([^\s,]+)',
105
  'cfg_scale': r'CFG scale:\s*([\d\.]+)',
106
  'size': r'Size:\s*([0-9]+x[0-9]+)'
107
- }
108
  for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
109
  match = re.search(patterns[key], text)
110
  if match:
@@ -120,18 +146,20 @@ def get_data(text):
120
  results['h'] = None
121
  return results
122
 
 
123
  def send_to_img2img_def(image):
124
  return image
125
 
126
- def send_to_txt2img(image):
127
 
 
128
  result = {tabs: gr.update(selected="t2i")}
129
 
130
  try:
131
  text = image.info['parameters']
132
  data = get_data(text)
133
  result[prompt] = gr.update(value=data['prompt'])
134
- result[negative_prompt] = gr.update(value=data['negative_prompt']) if data['negative_prompt'] is not None else gr.update()
 
135
  result[steps] = gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update()
136
  result[seed] = gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update()
137
  result[cfg_scale] = gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update()
@@ -206,59 +234,60 @@ css = """
206
  with gr.Blocks(css=css) as demo:
207
  with gr.Row():
208
  with gr.Column(scale=6):
209
- model = gr.Dropdown(interactive=True,value="absolutereality_v181.safetensors [3d9d4d2b]", show_label=True, label="Stable Diffusion Checkpoint", choices=prodia_client.list_models())
210
-
211
- with gr.Column(scale=1):
212
- gr.Markdown(elem_id="powered-by-prodia", value="AUTOMATIC1111 Stable Diffusion Web UI.<br>Powered by [Prodia](https://prodia.com).<br>For more features and faster generation times check out our [API Docs](https://docs.prodia.com/reference/getting-started-guide).")
213
 
214
  with gr.Tabs() as tabs:
215
  with gr.Tab("txt2img", id='t2i'):
216
  with gr.Row():
217
  with gr.Column(scale=6, min_width=600):
218
- prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", placeholder="Prompt", show_label=False, lines=3)
219
- negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
 
 
220
  with gr.Column():
221
  text_button = gr.Button("Generate", variant='primary', elem_id="generate")
222
-
223
  with gr.Row():
224
  with gr.Column(scale=3):
225
  with gr.Tab("Generation"):
226
  with gr.Row():
227
  with gr.Column(scale=1):
228
- sampler = gr.Dropdown(value="DPM++ 2M Karras", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
229
-
 
230
  with gr.Column(scale=1):
231
  steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
232
-
233
  with gr.Row():
234
  with gr.Column(scale=1):
235
  width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
236
  height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
237
-
238
  with gr.Column(scale=1):
239
  batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
240
  batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
241
-
242
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
243
  seed = gr.Number(label="Seed", value=-1)
244
 
245
  with gr.Column(scale=2):
246
- image_output = gr.Image(show_label=False, type="filepath")
247
  send_to_img2img = gr.Button(value="Send to img2img")
248
 
249
-
250
-
251
  text_button.click(txt2img, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height,
252
  seed], outputs=image_output, concurrency_limit=64)
253
-
254
  with gr.Tab("img2img", id='i2i'):
255
  with gr.Row():
256
  with gr.Column(scale=6, min_width=600):
257
- i2i_prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", placeholder="Prompt", show_label=False, lines=3)
258
- i2i_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
 
 
259
  with gr.Column():
260
  i2i_text_button = gr.Button("Generate", variant='primary', elem_id="generate")
261
-
262
  with gr.Row():
263
  with gr.Column(scale=3):
264
  with gr.Tab("Generation"):
@@ -266,8 +295,9 @@ with gr.Blocks(css=css) as demo:
266
 
267
  with gr.Row():
268
  with gr.Column(scale=1):
269
- i2i_sampler = gr.Dropdown(value="Euler a", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
270
-
 
271
  with gr.Column(scale=1):
272
  i2i_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
273
 
@@ -275,32 +305,33 @@ with gr.Blocks(css=css) as demo:
275
  with gr.Column(scale=1):
276
  i2i_width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
277
  i2i_height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
278
-
279
  with gr.Column(scale=1):
280
  i2i_batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
281
  i2i_batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
282
-
283
  i2i_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
284
  i2i_denoising = gr.Slider(label="Denoising Strength", minimum=0, maximum=1, value=0.7, step=0.1)
285
  i2i_seed = gr.Number(label="Seed", value=-1)
286
 
287
  with gr.Column(scale=2):
288
- i2i_image_output = gr.Image(show_label=False, type="filepath")
289
-
290
  i2i_text_button.click(img2img, inputs=[i2i_image_input, i2i_denoising, i2i_prompt, i2i_negative_prompt,
291
  model, i2i_steps, i2i_sampler, i2i_cfg_scale, i2i_width, i2i_height,
292
  i2i_seed], outputs=i2i_image_output, concurrency_limit=64)
293
  send_to_img2img.click(send_to_img2img_def, inputs=image_output, outputs=i2i_image_input)
 
294
  with gr.Tab("PNG Info"):
295
  def plaintext_to_html(text, classname=None):
296
  content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
297
-
298
  return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
299
-
300
-
301
  def get_exif_data(image):
302
  items = image.info
303
-
304
  info = ''
305
  for key, text in items.items():
306
  info += f"""
@@ -308,26 +339,66 @@ with gr.Blocks(css=css) as demo:
308
  <p><b>{plaintext_to_html(str(key))}</b></p>
309
  <p>{plaintext_to_html(str(text))}</p>
310
  </div>
311
- """.strip()+"\n"
312
-
313
  if len(info) == 0:
314
  message = "Nothing found in the image."
315
  info = f"<div><p>{message}<p></div>"
316
-
317
  return info
318
-
 
319
  with gr.Row():
320
  with gr.Column():
321
  image_input = gr.Image(type="pil")
322
-
323
  with gr.Column():
324
  exif_output = gr.HTML(label="EXIF Data")
325
  send_to_txt2img_btn = gr.Button("Send to txt2img")
326
-
327
  image_input.upload(get_exif_data, inputs=[image_input], outputs=exif_output)
328
  send_to_txt2img_btn.click(send_to_txt2img, inputs=[image_input], outputs=[tabs, prompt, negative_prompt,
329
  steps, seed, model, sampler,
330
  width, height, cfg_scale],
331
  concurrency_limit=64)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
 
333
  demo.queue(max_size=80, api_open=False).launch(max_threads=256, show_api=False)
 
10
  import re
11
 
12
 
13
+ def query(payload, model):
14
+ HF_TOKEN = os.getenv("HF_TOKEN")
15
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
16
+ url = "https://api-inference.huggingface.co/models/"
17
+ API_URL = f"{url}{model}"
18
+ response = requests.post(API_URL, headers=headers, json=payload)
19
+ return response.content
20
+
21
+ def hf_inference(prompt, negative, model, steps, sampler, guidance, width, height, seed):
22
+ image_bytes = query(payload={
23
+ "inputs": f"{prompt}",
24
+ "parameters": {
25
+ "negative_prompt": f"{negative}",
26
+ "num_inference_steps": steps,
27
+ "guidance_scale": guidance,
28
+ "width": width, "height": height,
29
+ "seed": seed,
30
+ },
31
+ }, model=model)
32
+ image = Image.open(io.BytesIO(image_bytes))
33
+ return image
34
+
35
+
36
+
37
+
38
+
39
 
40
  class Prodia:
41
  def __init__(self, api_key, base=None):
 
43
  self.headers = {
44
  "X-Prodia-Key": api_key
45
  }
46
+
47
  def generate(self, params):
48
  response = self._post(f"{self.base}/sd/generate", params)
49
  return response.json()
50
+
51
  def transform(self, params):
52
  response = self._post(f"{self.base}/sd/transform", params)
53
  return response.json()
54
+
55
  def controlnet(self, params):
56
  response = self._post(f"{self.base}/sd/controlnet", params)
57
  return response.json()
58
+
59
  def get_job(self, job_id):
60
  response = self._get(f"{self.base}/job/{job_id}")
61
  return response.json()
 
102
  # Convert the image to bytes
103
  buffered = BytesIO()
104
  image.save(buffered, format="PNG") # You can change format to PNG if needed
105
+
106
  # Encode the bytes to base64
107
  img_str = base64.b64encode(buffered.getvalue())
108
 
 
126
  'negative_prompt': r'Negative prompt: (.*)',
127
  'steps': r'Steps: (\d+),',
128
  'seed': r'Seed: (\d+),',
129
+ 'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
130
  'model': r'Model:\s*([^\s,]+)',
131
  'cfg_scale': r'CFG scale:\s*([\d\.]+)',
132
  'size': r'Size:\s*([0-9]+x[0-9]+)'
133
+ }
134
  for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
135
  match = re.search(patterns[key], text)
136
  if match:
 
146
  results['h'] = None
147
  return results
148
 
149
+
150
  def send_to_img2img_def(image):
151
  return image
152
 
 
153
 
154
+ def send_to_txt2img(image):
155
  result = {tabs: gr.update(selected="t2i")}
156
 
157
  try:
158
  text = image.info['parameters']
159
  data = get_data(text)
160
  result[prompt] = gr.update(value=data['prompt'])
161
+ result[negative_prompt] = gr.update(value=data['negative_prompt']) if data[
162
+ 'negative_prompt'] is not None else gr.update()
163
  result[steps] = gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update()
164
  result[seed] = gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update()
165
  result[cfg_scale] = gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update()
 
234
  with gr.Blocks(css=css) as demo:
235
  with gr.Row():
236
  with gr.Column(scale=6):
237
+ model = gr.Dropdown(interactive=True, value="absolutereality_v181.safetensors [3d9d4d2b]", show_label=True,
238
+ label="Stable Diffusion Checkpoint", choices=prodia_client.list_models())
 
 
239
 
240
  with gr.Tabs() as tabs:
241
  with gr.Tab("txt2img", id='t2i'):
242
  with gr.Row():
243
  with gr.Column(scale=6, min_width=600):
244
+ prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k",
245
+ placeholder="Prompt", show_label=False, lines=3)
246
+ negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3,
247
+ value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
248
  with gr.Column():
249
  text_button = gr.Button("Generate", variant='primary', elem_id="generate")
250
+
251
  with gr.Row():
252
  with gr.Column(scale=3):
253
  with gr.Tab("Generation"):
254
  with gr.Row():
255
  with gr.Column(scale=1):
256
+ sampler = gr.Dropdown(value="DPM++ 2M Karras", show_label=True, label="Sampling Method",
257
+ choices=prodia_client.list_samplers())
258
+
259
  with gr.Column(scale=1):
260
  steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
261
+
262
  with gr.Row():
263
  with gr.Column(scale=1):
264
  width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
265
  height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
266
+
267
  with gr.Column(scale=1):
268
  batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
269
  batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
270
+
271
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=8, step=1)
272
  seed = gr.Number(label="Seed", value=-1)
273
 
274
  with gr.Column(scale=2):
275
+ image_output = gr.Image(show_label=False, type="filepath", interactive=False)
276
  send_to_img2img = gr.Button(value="Send to img2img")
277
 
 
 
278
  text_button.click(txt2img, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height,
279
  seed], outputs=image_output, concurrency_limit=64)
280
+
281
  with gr.Tab("img2img", id='i2i'):
282
  with gr.Row():
283
  with gr.Column(scale=6, min_width=600):
284
+ i2i_prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k",
285
+ placeholder="Prompt", show_label=False, lines=3)
286
+ i2i_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3,
287
+ value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
288
  with gr.Column():
289
  i2i_text_button = gr.Button("Generate", variant='primary', elem_id="generate")
290
+
291
  with gr.Row():
292
  with gr.Column(scale=3):
293
  with gr.Tab("Generation"):
 
295
 
296
  with gr.Row():
297
  with gr.Column(scale=1):
298
+ i2i_sampler = gr.Dropdown(value="Euler a", show_label=True, label="Sampling Method",
299
+ choices=prodia_client.list_samplers())
300
+
301
  with gr.Column(scale=1):
302
  i2i_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
303
 
 
305
  with gr.Column(scale=1):
306
  i2i_width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
307
  i2i_height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
308
+
309
  with gr.Column(scale=1):
310
  i2i_batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
311
  i2i_batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
312
+
313
  i2i_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
314
  i2i_denoising = gr.Slider(label="Denoising Strength", minimum=0, maximum=1, value=0.7, step=0.1)
315
  i2i_seed = gr.Number(label="Seed", value=-1)
316
 
317
  with gr.Column(scale=2):
318
+ i2i_image_output = gr.Image(show_label=False, type="filepath", interactive=False)
319
+
320
  i2i_text_button.click(img2img, inputs=[i2i_image_input, i2i_denoising, i2i_prompt, i2i_negative_prompt,
321
  model, i2i_steps, i2i_sampler, i2i_cfg_scale, i2i_width, i2i_height,
322
  i2i_seed], outputs=i2i_image_output, concurrency_limit=64)
323
  send_to_img2img.click(send_to_img2img_def, inputs=image_output, outputs=i2i_image_input)
324
+
325
  with gr.Tab("PNG Info"):
326
  def plaintext_to_html(text, classname=None):
327
  content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
328
+
329
  return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
330
+
331
+
332
  def get_exif_data(image):
333
  items = image.info
334
+
335
  info = ''
336
  for key, text in items.items():
337
  info += f"""
 
339
  <p><b>{plaintext_to_html(str(key))}</b></p>
340
  <p>{plaintext_to_html(str(text))}</p>
341
  </div>
342
+ """.strip() + "\n"
343
+
344
  if len(info) == 0:
345
  message = "Nothing found in the image."
346
  info = f"<div><p>{message}<p></div>"
347
+
348
  return info
349
+
350
+
351
  with gr.Row():
352
  with gr.Column():
353
  image_input = gr.Image(type="pil")
354
+
355
  with gr.Column():
356
  exif_output = gr.HTML(label="EXIF Data")
357
  send_to_txt2img_btn = gr.Button("Send to txt2img")
358
+
359
  image_input.upload(get_exif_data, inputs=[image_input], outputs=exif_output)
360
  send_to_txt2img_btn.click(send_to_txt2img, inputs=[image_input], outputs=[tabs, prompt, negative_prompt,
361
  steps, seed, model, sampler,
362
  width, height, cfg_scale],
363
  concurrency_limit=64)
364
+ with gr.Tab("HuggingFace Inference"):
365
+ with gr.Row():
366
+ hf_model = gr.Dropdown(label="HuggingFace checkpoint", choices=["runwayml/stable-diffusion-v1-5", "stabilityai/stable-diffusion-2-1", "dataautogpt3/OpenDalleV1.1", "CompVis/stable-diffusion-v1-4", "playgroundai/playground-v2-1024px-aesthetic", "prompthero/openjourney", "openskyml/dreamdrop-v1", "SG161222/Realistic_Vision_V1.4", "digiplay/AbsoluteReality_v1.8.1", "openskyml/dalle-3-xl", "Lykon/dreamshaper-7", "Pclanglais/Mickey-1928"], value="runwayml/stable-diffusion-v1-5", allow_custom_value=False, interactive=True)
367
+ with gr.Row():
368
+ with gr.Column(scale=6, min_width=600):
369
+ hf_prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k",
370
+ placeholder="Prompt", show_label=False, lines=3)
371
+ hd_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3,
372
+ value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
373
+ with gr.Column():
374
+ hf_text_button = gr.Button("Generate with HF", variant='primary', elem_id="generate")
375
+
376
+ with gr.Row():
377
+ with gr.Column(scale=3):
378
+ with gr.Tab("Generation"):
379
+ with gr.Row():
380
+
381
+ with gr.Column(scale=1):
382
+ hf_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
383
+
384
+ with gr.Row():
385
+ with gr.Column(scale=1):
386
+ hf_width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
387
+ hf_height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
388
+
389
+ with gr.Column(scale=1):
390
+ hf_batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
391
+ hf_batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
392
+
393
+ hf_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=8, step=1)
394
+ hf_seed = gr.Number(label="Seed", value=-1)
395
+
396
+ with gr.Column(scale=2):
397
+ hf_image_output = gr.Image(show_label=False, type="filepath", interactive=False)
398
+ #hf_send_to_img2img = gr.Button(value="Send to img2img")
399
+
400
+ hf_text_button.click(hf_inference, inputs=[hf_prompt, hf_negative_prompt, hf_model, hf_steps, hf_sampler, hf_cfg_scale, hf_width, hf_height,
401
+ hf_seed], outputs=hf_image_output, concurrency_limit=64)
402
+
403
 
404
  demo.queue(max_size=80, api_open=False).launch(max_threads=256, show_api=False)