John6666 commited on
Commit
8300fda
β€’
1 Parent(s): 3c6fee5

Upload 3 files

Browse files
Files changed (3) hide show
  1. all_models.py +14 -1
  2. app.py +128 -41
  3. externalmod.py +57 -4
all_models.py CHANGED
@@ -901,4 +901,17 @@ models = [
901
  "CompVis/stable-diffusion-v1-3", #207
902
  "CompVis/stable-diffusion-v1-2", #208
903
  "CompVis/stable-diffusion-v1-1", #209
904
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
901
  "CompVis/stable-diffusion-v1-3", #207
902
  "CompVis/stable-diffusion-v1-2", #208
903
  "CompVis/stable-diffusion-v1-1", #209
904
+ ]
905
+
906
+
907
+ from externalmod import find_model_list
908
+
909
+ #models = find_model_list("Yntec", [], "", "last_modified", 20)
910
+
911
+ # Examples:
912
+ #models = ['yodayo-ai/kivotos-xl-2.0', 'yodayo-ai/holodayo-xl-2.1'] # specific models
913
+ #models = find_model_list("Yntec", [], "", "last_modified", 20) # Yntec's latest 20 models
914
+ #models = find_model_list("Yntec", ["anime"], "", "last_modified", 20) # Yntec's latest 20 models with 'anime' tag
915
+ #models = find_model_list("Yntec", [], "anime", "last_modified", 20) # Yntec's latest 20 models without 'anime' tag
916
+ #models = find_model_list("", [], "", "last_modified", 20) # latest 20 text-to-image models of huggingface
917
+ #models = find_model_list("", [], "", "downloads", 20) # monthly most downloaded 20 text-to-image models of huggingface
app.py CHANGED
@@ -1,40 +1,42 @@
1
  import gradio as gr
2
  from random import randint
3
  from all_models import models
4
-
5
  from externalmod import gr_Interface_load
 
 
 
 
 
6
 
7
  def load_fn(models):
8
  global models_load
9
  models_load = {}
10
-
11
  for model in models:
12
  if model not in models_load.keys():
13
  try:
14
- m = gr_Interface_load(f'models/{model}')
15
  except Exception as error:
16
- m = gr.Interface(lambda txt: None, ['text'], ['image'])
 
17
  models_load.update({model: m})
18
 
19
-
20
  load_fn(models)
21
 
22
-
23
  num_models = 1
 
 
24
  default_models = models[:num_models]
25
-
26
-
27
 
28
  def extend_choices(choices):
29
  return choices + (num_models - len(choices)) * ['NA']
30
 
31
-
32
  def update_imgbox(choices):
33
  choices_plus = extend_choices(choices)
34
- return [gr.Image(None, label = m, visible = (m != 'NA')) for m in choices_plus]
35
 
36
-
37
- def gen_fn(model_str, prompt):
38
  if model_str == 'NA':
39
  return None
40
  noise = str('') #str(randint(0, 99999999999))
@@ -45,7 +47,61 @@ def gen_fnsix(model_str, prompt):
45
  return None
46
  noisesix = str(randint(1941, 2023)) #str(randint(0, 99999999999))
47
  return models_load[model_str](f'{prompt} {noisesix}')
48
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  gr.HTML(
50
  """
51
  <div>
@@ -54,24 +110,38 @@ with gr.Blocks() as demo:
54
  """
55
  )
56
  with gr.Tab('One Image'):
57
- model_choice = gr.Dropdown(models, label = 'Choose a model from the 872 available! Try clearing the box and typing on it to filter them!', value = models[0], filterable = True)
58
- txt_input = gr.Textbox(label = 'Your prompt:')
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- max_imagesone = 1
61
- num_imagesone = gr.Slider(1, max_imagesone, value = max_imagesone, step = 1, label = 'Nobody gets to see this label so I can put here whatever I want!', visible = False)
62
-
63
- gen_button = gr.Button('Generate')
64
- stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
65
- gen_button.click(lambda s: gr.update(interactive = True), None, stop_button)
66
 
67
  with gr.Row():
68
- output = [gr.Image(label = '') for _ in range(max_imagesone)]
 
 
69
 
70
  for i, o in enumerate(output):
71
  img_in = gr.Number(i, visible = False)
72
  num_imagesone.change(lambda i, n: gr.update(visible = (i < n)), [img_in, num_imagesone], o, show_progress = False)
73
- gen_event = gen_button.click(lambda i, n, m, t: gen_fn(m, t) if (i < n) else None, [img_in, num_imagesone, model_choice, txt_input], o)
74
- stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
 
 
 
75
  with gr.Row():
76
  gr.HTML(
77
  """
@@ -81,35 +151,52 @@ with gr.Blocks() as demo:
81
  """
82
  )
83
  with gr.Tab('Up To Six'):
84
- model_choice2 = gr.Dropdown(models, label = 'Choose a model from the 872 available! Try clearing the box and typing on it to filter them!', value = models[0], filterable = True)
85
- txt_input2 = gr.Textbox(label = 'Your prompt:')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
- max_images = 6
88
- num_images = gr.Slider(1, max_images, value = max_images, step = 1, label = 'Number of images (if you want less than 6 decrease them slowly until they match the boxes below)')
89
-
90
- gen_button2 = gr.Button('Generate up to 6 images in up to 3 minutes total')
91
- stop_button2 = gr.Button('Stop', variant = 'secondary', interactive = False)
92
- gen_button2.click(lambda s: gr.update(interactive = True), None, stop_button2)
93
  gr.HTML(
94
  """
95
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
96
  <div>
97
  <body>
98
- <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images (they generate in a random order).</p>
99
  </div>
100
  </body>
101
  </div>
102
  </div>
103
  """
104
  )
105
- with gr.Column():
106
- output2 = [gr.Image(label = '') for _ in range(max_images)]
 
 
107
 
108
  for i, o in enumerate(output2):
109
- img_i = gr.Number(i, visible = False)
110
- num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, show_progress = False)
111
- gen_event2 = gen_button2.click(lambda i, n, m, t: gen_fnsix(m, t) if (i < n) else None, [img_i, num_images, model_choice2, txt_input2], o)
112
- stop_button2.click(lambda s: gr.update(interactive = False), None, stop_button2, cancels = [gen_event2])
 
 
 
113
  with gr.Row():
114
  gr.HTML(
115
  """
@@ -119,5 +206,5 @@ with gr.Blocks() as demo:
119
  """
120
  )
121
 
122
- demo.queue()
123
- demo.launch()
 
1
  import gradio as gr
2
  from random import randint
3
  from all_models import models
 
4
  from externalmod import gr_Interface_load
5
+ import asyncio
6
+ import os
7
+ from threading import RLock
8
+ lock = RLock()
9
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
10
 
11
  def load_fn(models):
12
  global models_load
13
  models_load = {}
 
14
  for model in models:
15
  if model not in models_load.keys():
16
  try:
17
+ m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
18
  except Exception as error:
19
+ print(error)
20
+ m = gr.Interface(lambda: None, ['text'], ['image'])
21
  models_load.update({model: m})
22
 
 
23
  load_fn(models)
24
 
 
25
  num_models = 1
26
+ max_imagesone = 1
27
+ max_images = 6
28
  default_models = models[:num_models]
29
+ inference_timeout = 300
30
+ MAX_SEED = 2**32-1
31
 
32
  def extend_choices(choices):
33
  return choices + (num_models - len(choices)) * ['NA']
34
 
 
35
  def update_imgbox(choices):
36
  choices_plus = extend_choices(choices)
37
+ return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
38
 
39
+ def gen_fn_original(model_str, prompt):
 
40
  if model_str == 'NA':
41
  return None
42
  noise = str('') #str(randint(0, 99999999999))
 
47
  return None
48
  noisesix = str(randint(1941, 2023)) #str(randint(0, 99999999999))
49
  return models_load[model_str](f'{prompt} {noisesix}')
50
+
51
+ # https://huggingface.co/docs/api-inference/detailed_parameters
52
+ # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
53
+ async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
54
+ from pathlib import Path
55
+ kwargs = {}
56
+ if height is not None and height >= 256: kwargs["height"] = height
57
+ if width is not None and width >= 256: kwargs["width"] = width
58
+ if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
59
+ if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
60
+ noise = ""
61
+ if seed >= 0: kwargs["seed"] = seed
62
+ else:
63
+ rand = randint(1, 500)
64
+ for i in range(rand):
65
+ noise += " "
66
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
67
+ prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
68
+ await asyncio.sleep(0)
69
+ try:
70
+ result = await asyncio.wait_for(task, timeout=timeout)
71
+ except (Exception, asyncio.TimeoutError) as e:
72
+ print(e)
73
+ print(f"Task timed out: {model_str}")
74
+ if not task.done(): task.cancel()
75
+ result = None
76
+ if task.done() and result is not None:
77
+ with lock:
78
+ png_path = "image.png"
79
+ result.save(png_path)
80
+ image = str(Path(png_path).resolve())
81
+ return image
82
+ return None
83
+
84
+ def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
85
+ if model_str == 'NA':
86
+ return None
87
+ try:
88
+ loop = asyncio.new_event_loop()
89
+ result = loop.run_until_complete(infer(model_str, prompt, nprompt,
90
+ height, width, steps, cfg, seed, inference_timeout))
91
+ except (Exception, asyncio.CancelledError) as e:
92
+ print(e)
93
+ print(f"Task aborted: {model_str}")
94
+ result = None
95
+ finally:
96
+ loop.close()
97
+ return result
98
+
99
+ css="""
100
+ .gradio-container {max-width: 1200px; margin: 0 auto; !important;}
101
+ .output { width=128px; height=128px; !important; }
102
+ .outputone { width=512px; height=512px; !important; }
103
+ """
104
+ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as demo:
105
  gr.HTML(
106
  """
107
  <div>
 
110
  """
111
  )
112
  with gr.Tab('One Image'):
113
+ model_choice = gr.Dropdown(models, label=f'Choose a model from the {int(len(models))} available! Try clearing the box and typing on it to filter them!', value=models[0], filterable=True)
114
+ with gr.Group():
115
+ txt_input = gr.Textbox(label='Your prompt:', lines=1)
116
+ with gr.Accordion("Advanced", open=False, visible=True):
117
+ neg_input = gr.Textbox(label='Negative prompt:', lines=1)
118
+ with gr.Row():
119
+ width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
120
+ height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
121
+ with gr.Row():
122
+ steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
123
+ cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
124
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
125
+ num_imagesone = gr.Slider(1, max_imagesone, value=max_imagesone, step=1, label='Nobody gets to see this label so I can put here whatever I want!', visible=False)
126
 
127
+ with gr.Row():
128
+ gen_button = gr.Button('Generate', scale=3)
129
+ stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
130
+ gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
 
 
131
 
132
  with gr.Row():
133
+ output = [gr.Image(label='', show_download_button=True, elem_classes="outputone",
134
+ interactive=False, min_width=80, show_share_button=False, format="png",
135
+ visible=True) for _ in range(max_imagesone)]
136
 
137
  for i, o in enumerate(output):
138
  img_in = gr.Number(i, visible = False)
139
  num_imagesone.change(lambda i, n: gr.update(visible = (i < n)), [img_in, num_imagesone], o, show_progress = False)
140
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit],
141
+ fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
142
+ inputs=[img_in, num_imagesone, model_choice, txt_input, neg_input,
143
+ height, width, steps, cfg, seed], outputs=[o])
144
+ stop_button.click(lambda: gr.update(interactive = False), None, stop_button, cancels=[gen_event])
145
  with gr.Row():
146
  gr.HTML(
147
  """
 
151
  """
152
  )
153
  with gr.Tab('Up To Six'):
154
+ model_choice2 = gr.Dropdown(models, label=f'Choose a model from the {int(len(models))} available! Try clearing the box and typing on it to filter them!',
155
+ value=models[0], filterable=True)
156
+ with gr.Group():
157
+ txt_input2 = gr.Textbox(label='Your prompt:', lines=1)
158
+ with gr.Accordion("Advanced", open=False, visible=True):
159
+ neg_input2 = gr.Textbox(label='Negative prompt:', lines=1)
160
+ with gr.Row():
161
+ width2 = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
162
+ height2 = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
163
+ with gr.Row():
164
+ steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
165
+ cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
166
+ seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
167
+
168
+ num_images = gr.Slider(1, max_images, value=max_images, step=1,
169
+ label=f'Number of images (if you want less than {int(max_images)} decrease them slowly until they match the boxes below)')
170
 
171
+ with gr.Row():
172
+ gen_button2 = gr.Button(f'Generate up to {int(max_images)} images in up to 3 minutes total', scale=3)
173
+ stop_button2 = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
174
+ gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
 
 
175
  gr.HTML(
176
  """
177
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
178
  <div>
179
  <body>
180
+ <div class="center"><p style="margin-bottom: 10px;">Scroll down to see more images (they generate in a random order).</p>
181
  </div>
182
  </body>
183
  </div>
184
  </div>
185
  """
186
  )
187
+ with gr.Row():
188
+ output2 = [gr.Image(label = '', show_download_button=True, elem_classes="output",
189
+ interactive=False, min_width=80, visible=True, format="png",
190
+ show_share_button=False, show_label=False) for _ in range(max_images)]
191
 
192
  for i, o in enumerate(output2):
193
+ img_i = gr.Number(i, visible=False)
194
+ num_images.change(lambda i, n: gr.update(visible=(i < n)), [img_i, num_images], o, show_progress=False)
195
+ gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
196
+ fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
197
+ inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
198
+ height2, width2, steps2, cfg2, seed2], outputs=[o])
199
+ stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
200
  with gr.Row():
201
  gr.HTML(
202
  """
 
206
  """
207
  )
208
 
209
+ demo.queue(default_concurrency_limit=200, max_size=200)
210
+ demo.launch(show_api=False, max_threads=400)
externalmod.py CHANGED
@@ -33,6 +33,9 @@ if TYPE_CHECKING:
33
  from gradio.interface import Interface
34
 
35
 
 
 
 
36
  @document()
37
  def load(
38
  name: str,
@@ -115,7 +118,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
115
 
116
  headers["X-Wait-For-Model"] = "true"
117
  client = huggingface_hub.InferenceClient(
118
- model=model_name, headers=headers, token=hf_token
119
  )
120
 
121
  # For tasks that are not yet supported by the InferenceClient
@@ -365,10 +368,10 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
365
  else:
366
  raise ValueError(f"Unsupported pipeline type: {p}")
367
 
368
- def query_huggingface_inference_endpoints(*data):
369
  if preprocess is not None:
370
  data = preprocess(*data)
371
- data = fn(*data) # type: ignore
372
  if postprocess is not None:
373
  data = postprocess(data) # type: ignore
374
  return data
@@ -528,4 +531,54 @@ def gr_Interface_load(
528
  alias: str | None = None,
529
  **kwargs,
530
  ) -> Blocks:
531
- return load_blocks_from_repo(name, src, hf_token, alias)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  from gradio.interface import Interface
34
 
35
 
36
+ server_timeout = 600
37
+
38
+
39
  @document()
40
  def load(
41
  name: str,
 
118
 
119
  headers["X-Wait-For-Model"] = "true"
120
  client = huggingface_hub.InferenceClient(
121
+ model=model_name, headers=headers, token=hf_token, timeout=server_timeout,
122
  )
123
 
124
  # For tasks that are not yet supported by the InferenceClient
 
368
  else:
369
  raise ValueError(f"Unsupported pipeline type: {p}")
370
 
371
+ def query_huggingface_inference_endpoints(*data, **kwargs):
372
  if preprocess is not None:
373
  data = preprocess(*data)
374
+ data = fn(*data, **kwargs) # type: ignore
375
  if postprocess is not None:
376
  data = postprocess(data) # type: ignore
377
  return data
 
531
  alias: str | None = None,
532
  **kwargs,
533
  ) -> Blocks:
534
+ try:
535
+ return load_blocks_from_repo(name, src, hf_token, alias)
536
+ except Exception as e:
537
+ print(e)
538
+ return gradio.Interface(lambda: None, ['text'], ['image'])
539
+
540
+
541
+ def list_uniq(l):
542
+ return sorted(set(l), key=l.index)
543
+
544
+
545
+ def get_status(model_name: str):
546
+ from huggingface_hub import InferenceClient
547
+ client = InferenceClient(timeout=10)
548
+ return client.get_model_status(model_name)
549
+
550
+
551
+ def is_loadable(model_name: str, force_gpu: bool = False):
552
+ try:
553
+ status = get_status(model_name)
554
+ except Exception as e:
555
+ print(e)
556
+ print(f"Couldn't load {model_name}.")
557
+ return False
558
+ gpu_state = isinstance(status.compute_type, dict) and "gpu" in status.compute_type.keys()
559
+ if status is None or status.state not in ["Loadable", "Loaded"] or (force_gpu and not gpu_state):
560
+ print(f"Couldn't load {model_name}. Model state:'{status.state}', GPU:{gpu_state}")
561
+ return status is not None and status.state in ["Loadable", "Loaded"] and (not force_gpu or gpu_state)
562
+
563
+
564
+ def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
565
+ from huggingface_hub import HfApi
566
+ api = HfApi()
567
+ default_tags = ["diffusers"]
568
+ if not sort: sort = "last_modified"
569
+ limit = limit * 20 if check_status and force_gpu else limit * 5
570
+ models = []
571
+ try:
572
+ model_infos = api.list_models(author=author, task="text-to-image",
573
+ tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
574
+ except Exception as e:
575
+ print(f"Error: Failed to list models.")
576
+ print(e)
577
+ return models
578
+ for model in model_infos:
579
+ if not model.private and not model.gated:
580
+ loadable = is_loadable(model.id, force_gpu) if check_status else True
581
+ if not_tag and not_tag in model.tags or not loadable: continue
582
+ models.append(model.id)
583
+ if len(models) == limit: break
584
+ return models