Yntec commited on
Commit
4227cb0
β€’
1 Parent(s): bc7f90d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -33
app.py CHANGED
@@ -1,11 +1,8 @@
1
  import gradio as gr
2
- import os
3
- import sys
4
- from pathlib import Path
5
  from all_models import models
6
- from externalmod import gr_Interface_load
7
  from prompt_extend import extend_prompt
8
- from random import randint
9
  import asyncio
10
  from threading import RLock
11
  lock = RLock()
@@ -32,53 +29,47 @@ def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed):
32
 
33
  # https://huggingface.co/docs/api-inference/detailed_parameters
34
  # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
35
- async def infer(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
36
- from pathlib import Path
37
  kwargs = {}
38
- if height is not None and height >= 256: kwargs["height"] = height
39
- if width is not None and width >= 256: kwargs["width"] = width
40
- if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
41
- if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
42
- noise = ""
43
- if seed >= 0: kwargs["seed"] = seed
44
- else:
45
- rand = randint(1, 500)
46
- for i in range(rand):
47
- noise += " "
48
  task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
49
- prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
50
  await asyncio.sleep(0)
51
  try:
52
  result = await asyncio.wait_for(task, timeout=timeout)
53
  except asyncio.TimeoutError as e:
54
  print(e)
55
- print(f"Task timed out: {models2[model_index]}")
56
  if not task.done(): task.cancel()
57
  result = None
58
- raise Exception(f"Task timed out: {models2[model_index]}")
59
  except Exception as e:
60
  print(e)
61
  if not task.done(): task.cancel()
62
  result = None
63
- raise Exception(e)
64
  if task.done() and result is not None and not isinstance(result, tuple):
65
  with lock:
66
  png_path = "image.png"
67
- result.save(png_path)
68
- image = str(Path(png_path).resolve())
69
  return image
70
  return None
71
 
72
- def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
73
  try:
74
  loop = asyncio.new_event_loop()
75
  result = loop.run_until_complete(infer(model_index, prompt, nprompt,
76
  height, width, steps, cfg, seed, inference_timeout))
77
  except (Exception, asyncio.CancelledError) as e:
78
  print(e)
79
- print(f"Task aborted: {models2[model_index]}")
80
  result = None
81
- raise gr.Error(f"Task aborted: {models2[model_index]}, Error: {e}")
82
  finally:
83
  loop.close()
84
  return result
@@ -98,13 +89,13 @@ display: inline-block !important; color: #ffffff !important;}
98
  border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
99
  """
100
 
101
- with gr.Blocks(theme='Yntec/YntecDarkTheme', fill_width=True, css=css) as myface:
102
  gr.HTML(f"""
103
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
104
  <div class="center"><h1>Blitz Diffusion</h1></div>
105
  <p style="margin-bottom: 1px; color: #ffaa66;">
106
  <h3>{int(len(models))} Stable Diffusion models, but why? For your enjoyment!</h3></p>
107
- <br><div class="wrapper">9.3 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 25+ new models since last update!</div>
108
  <p style="margin-bottom: 1px; font-size: 98%">
109
  <br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
110
  <p style="margin-bottom: 1px; color: #ffffff;">
@@ -126,9 +117,10 @@ with gr.Blocks(theme='Yntec/YntecDarkTheme', fill_width=True, css=css) as myface
126
  width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
127
  height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
128
  with gr.Row():
129
- steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, elem_classes=["gr-box", "gr-input"])
130
- cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, elem_classes=["gr-box", "gr-input"])
131
  seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, elem_classes=["gr-box", "gr-input"])
 
132
  run = gr.Button("Generate Image", variant="primary", elem_classes="gr-button")
133
 
134
  with gr.Row():
@@ -153,9 +145,10 @@ with gr.Blocks(theme='Yntec/YntecDarkTheme', fill_width=True, css=css) as myface
153
  concurrency_limit=None,
154
  queue=False,
155
  )
156
- use_short.click(short_prompt, inputs=[input_text], outputs=magic1, queue=False)
157
- see_prompts.click(text_it1, inputs=[input_text], outputs=magic1, queue=False)
 
158
 
159
  myface.queue(default_concurrency_limit=200, max_size=200)
160
  myface.launch(show_api=False, max_threads=400)
161
- # https://github.com/gradio-app/gradio/issues/6339
 
1
  import gradio as gr
2
+ import os
 
 
3
  from all_models import models
4
+ from externalmod import gr_Interface_load, save_image, randomize_seed
5
  from prompt_extend import extend_prompt
 
6
  import asyncio
7
  from threading import RLock
8
  lock = RLock()
 
29
 
30
  # https://huggingface.co/docs/api-inference/detailed_parameters
31
  # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
32
+ async def infer(model_index, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
 
33
  kwargs = {}
34
+ if height > 0: kwargs["height"] = height
35
+ if width > 0: kwargs["width"] = width
36
+ if steps > 0: kwargs["num_inference_steps"] = steps
37
+ if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
38
+ if seed == -1: kwargs["seed"] = randomize_seed()
39
+ else: kwargs["seed"] = seed
 
 
 
 
40
  task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
41
+ prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
42
  await asyncio.sleep(0)
43
  try:
44
  result = await asyncio.wait_for(task, timeout=timeout)
45
  except asyncio.TimeoutError as e:
46
  print(e)
47
+ print(f"Task timed out: {models[model_index]}")
48
  if not task.done(): task.cancel()
49
  result = None
50
+ raise Exception(f"Task timed out: {models[model_index]}") from e
51
  except Exception as e:
52
  print(e)
53
  if not task.done(): task.cancel()
54
  result = None
55
+ raise Exception() from e
56
  if task.done() and result is not None and not isinstance(result, tuple):
57
  with lock:
58
  png_path = "image.png"
59
+ image = save_image(result, png_path, models[model_index], prompt, nprompt, height, width, steps, cfg, seed)
 
60
  return image
61
  return None
62
 
63
+ def gen_fn(model_index, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
64
  try:
65
  loop = asyncio.new_event_loop()
66
  result = loop.run_until_complete(infer(model_index, prompt, nprompt,
67
  height, width, steps, cfg, seed, inference_timeout))
68
  except (Exception, asyncio.CancelledError) as e:
69
  print(e)
70
+ print(f"Task aborted: {models[model_index]}")
71
  result = None
72
+ raise gr.Error(f"Task aborted: {models[model_index]}, Error: {e}")
73
  finally:
74
  loop.close()
75
  return result
 
89
  border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
90
  """
91
 
92
+ with gr.Blocks(theme='John6666/YntecDark', fill_width=True, css=css) as myface:
93
  gr.HTML(f"""
94
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
95
  <div class="center"><h1>Blitz Diffusion</h1></div>
96
  <p style="margin-bottom: 1px; color: #ffaa66;">
97
  <h3>{int(len(models))} Stable Diffusion models, but why? For your enjoyment!</h3></p>
98
+ <br><div class="wrapper">9.19 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 18 new models since last update!</div>
99
  <p style="margin-bottom: 1px; font-size: 98%">
100
  <br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
101
  <p style="margin-bottom: 1px; color: #ffffff;">
 
117
  width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
118
  height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
119
  with gr.Row():
120
+ steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=33, step=1, value=0, elem_classes=["gr-box", "gr-input"])
121
+ cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=-1, elem_classes=["gr-box", "gr-input"])
122
  seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, elem_classes=["gr-box", "gr-input"])
123
+ seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
124
  run = gr.Button("Generate Image", variant="primary", elem_classes="gr-button")
125
 
126
  with gr.Row():
 
145
  concurrency_limit=None,
146
  queue=False,
147
  )
148
+ use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
149
+ see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
150
+ seed_rand.click(randomize_seed, None, [seed], queue=False)
151
 
152
  myface.queue(default_concurrency_limit=200, max_size=200)
153
  myface.launch(show_api=False, max_threads=400)
154
+ # https://github.com/gradio-app/gradio/issues/6339