John6666 commited on
Commit
d1ad803
β€’
1 Parent(s): 9b9b49c

Upload 4 files

Browse files
Files changed (3) hide show
  1. README.md +2 -2
  2. app.py +42 -42
  3. multit2i.py +31 -14
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Free Multi Models Text-to-Image Demo V2
3
  emoji: 🌐🌊
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.42.0
8
  app_file: app.py
9
  short_description: Text-to-Image
10
  license: mit
 
1
  ---
2
+ title: Free Multi Models Text-to-Image Demo V3
3
  emoji: 🌐🌊
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 4.43.0
8
  app_file: app.py
9
  short_description: Text-to-Image
10
  license: mit
app.py CHANGED
@@ -5,57 +5,57 @@ from multit2i import (load_models, infer_fn, infer_rand_fn, save_gallery,
5
  get_positive_prefix, get_positive_suffix, get_negative_prefix, get_negative_suffix,
6
  get_recom_prompt_type, set_recom_prompt_preset, get_tag_type)
7
 
8
- max_images = 8
9
  MAX_SEED = 2**32-1
10
  load_models(models)
11
 
12
  css = """
13
  .model_info { text-align: center; }
14
- .output { width=112px; height=112px; !important; }
15
- .gallery { width=100%; min_height=768px; !important; }
16
  """
17
 
18
  with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo:
19
- with gr.Column():
20
- with gr.Group():
21
- model_name = gr.Dropdown(label="Select Model", choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True)
22
- model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]), elem_classes="model_info")
23
- with gr.Group():
24
- clear_prompt = gr.Button(value="Clear Prompt πŸ—‘οΈ", size="sm", scale=1)
25
- prompt = gr.Text(label="Prompt", lines=2, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
26
- neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="")
27
- with gr.Accordion("Advanced options", open=False):
28
- with gr.Row():
29
- width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
30
- height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
31
- with gr.Row():
32
- steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
33
- cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
34
- seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
35
- with gr.Accordion("Recommended Prompt", open=False):
36
- recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
37
- with gr.Row():
38
- positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
39
- positive_suffix = gr.CheckboxGroup(label="Use Positive Suffix", choices=get_positive_suffix(), value=["Common"])
40
- negative_prefix = gr.CheckboxGroup(label="Use Negative Prefix", choices=get_negative_prefix(), value=[])
41
- negative_suffix = gr.CheckboxGroup(label="Use Negative Suffix", choices=get_negative_suffix(), value=["Common"])
42
- image_num = gr.Slider(label="Number of images", minimum=1, maximum=max_images, value=1, step=1, interactive=True, scale=1)
43
- with gr.Row():
44
- run_button = gr.Button("Generate Image", scale=6)
45
- random_button = gr.Button("Random Model 🎲", scale=3)
46
- stop_button = gr.Button('Stop', interactive=False, scale=1)
47
- with gr.Column():
48
- with gr.Group():
49
  with gr.Row():
50
- output = [gr.Image(label='', elem_classes="output", type="filepath", format="png",
51
- show_download_button=True, show_share_button=False, show_label=False,
52
- interactive=False, min_width=80, visible=True) for _ in range(max_images)]
53
- with gr.Group():
54
- results = gr.Gallery(label="Gallery", elem_classes="gallery", interactive=False, show_download_button=True, show_share_button=False,
55
- container=True, format="png", object_fit="cover", columns=2, rows=2)
56
- image_files = gr.Files(label="Download", interactive=False)
57
- clear_results = gr.Button("Clear Gallery / Download πŸ—‘οΈ")
58
- with gr.Column():
 
 
 
 
 
 
 
 
 
59
  examples = gr.Examples(
60
  examples = [
61
  ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
 
5
  get_positive_prefix, get_positive_suffix, get_negative_prefix, get_negative_suffix,
6
  get_recom_prompt_type, set_recom_prompt_preset, get_tag_type)
7
 
8
+ max_images = 6
9
  MAX_SEED = 2**32-1
10
  load_models(models)
11
 
12
  css = """
13
  .model_info { text-align: center; }
14
+ .output { width=112px; height=112px; max_width=112px; max_height=112px; !important; }
15
+ .gallery { min_width=512px; min_height=512px; max_height=1024px; !important; }
16
  """
17
 
18
  with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", fill_width=True, css=css) as demo:
19
+ with gr.Row():
20
+ with gr.Column(scale=10):
21
+ with gr.Group():
22
+ clear_prompt = gr.Button(value="Clear Prompt πŸ—‘οΈ", variant="secondary", size="sm", scale=1)
23
+ prompt = gr.Text(label="Prompt", lines=2, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
24
+ with gr.Accordion("Advanced options", open=False):
25
+ neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="")
26
+ with gr.Row():
27
+ width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
28
+ height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
29
+ with gr.Row():
30
+ steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
31
+ cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
32
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
33
+ recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
34
+ with gr.Row():
35
+ positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
36
+ positive_suffix = gr.CheckboxGroup(label="Use Positive Suffix", choices=get_positive_suffix(), value=["Common"])
37
+ negative_prefix = gr.CheckboxGroup(label="Use Negative Prefix", choices=get_negative_prefix(), value=[])
38
+ negative_suffix = gr.CheckboxGroup(label="Use Negative Suffix", choices=get_negative_suffix(), value=["Common"])
39
+ image_num = gr.Slider(label="Number of images", minimum=1, maximum=max_images, value=1, step=1, interactive=True, scale=1)
 
 
 
 
 
 
 
 
 
40
  with gr.Row():
41
+ run_button = gr.Button("Generate Image", scale=6)
42
+ random_button = gr.Button("Random Model 🎲", variant="secondary", scale=3)
43
+ stop_button = gr.Button('Stop', interactive=False, variant="secondary", scale=1)
44
+ with gr.Group():
45
+ model_name = gr.Dropdown(label="Select Model", choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True)
46
+ model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]), elem_classes="model_info")
47
+ with gr.Column(scale=10):
48
+ with gr.Group():
49
+ with gr.Row():
50
+ output = [gr.Image(label='', elem_classes="output", type="filepath", format="png",
51
+ show_download_button=True, show_share_button=False, show_label=False, container=False,
52
+ interactive=False, min_width=80, visible=True) for _ in range(max_images)]
53
+ with gr.Group():
54
+ results = gr.Gallery(label="Gallery", elem_classes="gallery", interactive=False, show_download_button=True, show_share_button=False,
55
+ container=True, format="png", object_fit="cover", columns=2, rows=2)
56
+ image_files = gr.Files(label="Download", interactive=False)
57
+ clear_results = gr.Button("Clear Gallery / Download πŸ—‘οΈ", variant="secondary")
58
+ with gr.Column():
59
  examples = gr.Examples(
60
  examples = [
61
  ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
multit2i.py CHANGED
@@ -35,7 +35,7 @@ def is_repo_name(s):
35
 
36
  def get_status(model_name: str):
37
  from huggingface_hub import InferenceClient
38
- client = InferenceClient(timeout=10)
39
  return client.get_model_status(model_name)
40
 
41
 
@@ -54,7 +54,7 @@ def is_loadable(model_name: str, force_gpu: bool = False):
54
 
55
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
56
  from huggingface_hub import HfApi
57
- api = HfApi()
58
  default_tags = ["diffusers"]
59
  if not sort: sort = "last_modified"
60
  limit = limit * 20 if check_status and force_gpu else limit * 5
@@ -67,7 +67,7 @@ def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="l
67
  print(e)
68
  return models
69
  for model in model_infos:
70
- if not model.private and not model.gated:
71
  loadable = is_loadable(model.id, force_gpu) if check_status else True
72
  if not_tag and not_tag in model.tags or not loadable: continue
73
  models.append(model.id)
@@ -77,7 +77,7 @@ def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="l
77
 
78
  def get_t2i_model_info_dict(repo_id: str):
79
  from huggingface_hub import HfApi
80
- api = HfApi()
81
  info = {"md": "None"}
82
  try:
83
  if not is_repo_name(repo_id) or not api.repo_exists(repo_id=repo_id): return info
@@ -86,14 +86,15 @@ def get_t2i_model_info_dict(repo_id: str):
86
  print(f"Error: Failed to get {repo_id}'s info.")
87
  print(e)
88
  return info
89
- if model.private or model.gated: return info
90
  try:
91
  tags = model.tags
92
  except Exception as e:
93
  print(e)
94
  return info
95
  if not 'diffusers' in model.tags: return info
96
- if 'diffusers:StableDiffusionXLPipeline' in tags: info["ver"] = "SDXL"
 
97
  elif 'diffusers:StableDiffusionPipeline' in tags: info["ver"] = "SD1.5"
98
  elif 'diffusers:StableDiffusion3Pipeline' in tags: info["ver"] = "SD3"
99
  else: info["ver"] = "Other"
@@ -109,7 +110,8 @@ def get_t2i_model_info_dict(repo_id: str):
109
 
110
 
111
  def rename_image(image_path: str | None, model_name: str, save_path: str | None = None):
112
- from PIL import Image
 
113
  from datetime import datetime, timezone, timedelta
114
  if image_path is None: return None
115
  dt_now = datetime.now(timezone(timedelta(hours=9)))
@@ -352,7 +354,7 @@ def warm_model(model_name: str):
352
 
353
  # https://huggingface.co/docs/api-inference/detailed_parameters
354
  # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
355
- def infer_body(client: InferenceClient | gr.Interface, prompt: str, neg_prompt: str | None = None,
356
  height: int | None = None, width: int | None = None,
357
  steps: int | None = None, cfg: int | None = None, seed: int = -1):
358
  png_path = "image.png"
@@ -372,7 +374,7 @@ def infer_body(client: InferenceClient | gr.Interface, prompt: str, neg_prompt:
372
  return str(Path(png_path).resolve())
373
  except Exception as e:
374
  print(e)
375
- return None
376
 
377
 
378
  async def infer(model_name: str, prompt: str, neg_prompt: str | None = None,
@@ -392,11 +394,17 @@ async def infer(model_name: str, prompt: str, neg_prompt: str | None = None,
392
  await asyncio.sleep(0)
393
  try:
394
  result = await asyncio.wait_for(task, timeout=timeout)
395
- except (Exception, asyncio.TimeoutError) as e:
396
  print(e)
397
  print(f"Task timed out: {model_name}")
398
  if not task.done(): task.cancel()
399
  result = None
 
 
 
 
 
 
400
  if task.done() and result is not None:
401
  with lock:
402
  image = rename_image(result, model_name, save_path)
@@ -404,20 +412,25 @@ async def infer(model_name: str, prompt: str, neg_prompt: str | None = None,
404
  return None
405
 
406
 
 
407
  def infer_fn(model_name: str, prompt: str, neg_prompt: str | None = None, height: int | None = None,
408
  width: int | None = None, steps: int | None = None, cfg: int | None = None, seed: int = -1,
409
  pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], save_path: str | None = None):
410
  if model_name == 'NA':
411
  return None
412
  try:
413
- prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
 
414
  loop = asyncio.new_event_loop()
 
 
415
  result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
416
  steps, cfg, seed, save_path, inference_timeout))
417
  except (Exception, asyncio.CancelledError) as e:
418
  print(e)
419
- print(f"Task aborted: {model_name}")
420
  result = None
 
421
  finally:
422
  loop.close()
423
  return result
@@ -432,14 +445,18 @@ def infer_rand_fn(model_name_dummy: str, prompt: str, neg_prompt: str | None = N
432
  random.seed()
433
  model_name = random.choice(list(loaded_models.keys()))
434
  try:
435
- prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
 
436
  loop = asyncio.new_event_loop()
 
 
437
  result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
438
  steps, cfg, seed, save_path, inference_timeout))
439
  except (Exception, asyncio.CancelledError) as e:
440
  print(e)
441
- print(f"Task aborted: {model_name}")
442
  result = None
 
443
  finally:
444
  loop.close()
445
  return result
 
35
 
36
  def get_status(model_name: str):
37
  from huggingface_hub import InferenceClient
38
+ client = InferenceClient(token=HF_TOKEN, timeout=10)
39
  return client.get_model_status(model_name)
40
 
41
 
 
54
 
55
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
56
  from huggingface_hub import HfApi
57
+ api = HfApi(token=HF_TOKEN)
58
  default_tags = ["diffusers"]
59
  if not sort: sort = "last_modified"
60
  limit = limit * 20 if check_status and force_gpu else limit * 5
 
67
  print(e)
68
  return models
69
  for model in model_infos:
70
+ if not model.private and not model.gated or HF_TOKEN is not None:
71
  loadable = is_loadable(model.id, force_gpu) if check_status else True
72
  if not_tag and not_tag in model.tags or not loadable: continue
73
  models.append(model.id)
 
77
 
78
  def get_t2i_model_info_dict(repo_id: str):
79
  from huggingface_hub import HfApi
80
+ api = HfApi(token=HF_TOKEN)
81
  info = {"md": "None"}
82
  try:
83
  if not is_repo_name(repo_id) or not api.repo_exists(repo_id=repo_id): return info
 
86
  print(f"Error: Failed to get {repo_id}'s info.")
87
  print(e)
88
  return info
89
+ if model.private or model.gated and HF_TOKEN is None: return info
90
  try:
91
  tags = model.tags
92
  except Exception as e:
93
  print(e)
94
  return info
95
  if not 'diffusers' in model.tags: return info
96
+ if 'diffusers:FluxPipeline' in tags: info["ver"] = "FLUX.1"
97
+ elif 'diffusers:StableDiffusionXLPipeline' in tags: info["ver"] = "SDXL"
98
  elif 'diffusers:StableDiffusionPipeline' in tags: info["ver"] = "SD1.5"
99
  elif 'diffusers:StableDiffusion3Pipeline' in tags: info["ver"] = "SD3"
100
  else: info["ver"] = "Other"
 
110
 
111
 
112
  def rename_image(image_path: str | None, model_name: str, save_path: str | None = None):
113
+ from PIL import Image, ImageFile
114
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
115
  from datetime import datetime, timezone, timedelta
116
  if image_path is None: return None
117
  dt_now = datetime.now(timezone(timedelta(hours=9)))
 
354
 
355
  # https://huggingface.co/docs/api-inference/detailed_parameters
356
  # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
357
+ def infer_body(client: InferenceClient | gr.Interface | object, prompt: str, neg_prompt: str | None = None,
358
  height: int | None = None, width: int | None = None,
359
  steps: int | None = None, cfg: int | None = None, seed: int = -1):
360
  png_path = "image.png"
 
374
  return str(Path(png_path).resolve())
375
  except Exception as e:
376
  print(e)
377
+ raise Exception(e)
378
 
379
 
380
  async def infer(model_name: str, prompt: str, neg_prompt: str | None = None,
 
394
  await asyncio.sleep(0)
395
  try:
396
  result = await asyncio.wait_for(task, timeout=timeout)
397
+ except asyncio.TimeoutError as e:
398
  print(e)
399
  print(f"Task timed out: {model_name}")
400
  if not task.done(): task.cancel()
401
  result = None
402
+ raise Exception(f"Task timed out: {model_name}")
403
+ except Exception as e:
404
+ print(e)
405
+ if not task.done(): task.cancel()
406
+ result = None
407
+ raise Exception(e)
408
  if task.done() and result is not None:
409
  with lock:
410
  image = rename_image(result, model_name, save_path)
 
412
  return None
413
 
414
 
415
+ # https://github.com/aio-libs/pytest-aiohttp/issues/8 # also AsyncInferenceClient is buggy.
416
  def infer_fn(model_name: str, prompt: str, neg_prompt: str | None = None, height: int | None = None,
417
  width: int | None = None, steps: int | None = None, cfg: int | None = None, seed: int = -1,
418
  pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], save_path: str | None = None):
419
  if model_name == 'NA':
420
  return None
421
  try:
422
+ loop = asyncio.get_running_loop()
423
+ except Exception:
424
  loop = asyncio.new_event_loop()
425
+ try:
426
+ prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
427
  result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
428
  steps, cfg, seed, save_path, inference_timeout))
429
  except (Exception, asyncio.CancelledError) as e:
430
  print(e)
431
+ print(f"Task aborted: {model_name}, Error: {e}")
432
  result = None
433
+ raise gr.Error(f"Task aborted: {model_name}, Error: {e}")
434
  finally:
435
  loop.close()
436
  return result
 
445
  random.seed()
446
  model_name = random.choice(list(loaded_models.keys()))
447
  try:
448
+ loop = asyncio.get_running_loop()
449
+ except Exception:
450
  loop = asyncio.new_event_loop()
451
+ try:
452
+ prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
453
  result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
454
  steps, cfg, seed, save_path, inference_timeout))
455
  except (Exception, asyncio.CancelledError) as e:
456
  print(e)
457
+ print(f"Task aborted: {model_name}, Error: {e}")
458
  result = None
459
+ raise gr.Error(f"Task aborted: {model_name}, Error: {e}")
460
  finally:
461
  loop.close()
462
  return result