John6666 commited on
Commit
f3a071e
1 Parent(s): 0efcf6b

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +85 -82
  2. env.py +55 -1
  3. mod.py +28 -48
  4. requirements.txt +2 -1
app.py CHANGED
@@ -8,10 +8,12 @@ from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiCont
8
  from huggingface_hub import HfFileSystem, ModelCard
9
  import random
10
  import time
 
11
 
12
- from mod import (models, clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists,
13
- description_ui, num_loras, compose_lora_json, is_valid_lora, fuse_loras,
14
- get_trigger_word, enhance_prompt, deselect_lora, num_cns, set_control_union_image,
 
15
  get_control_union_mode, set_control_union_mode, get_control_params, translate_to_en)
16
  from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_json,
17
  download_my_lora, get_all_lora_tupled_list, apply_lora_prompt,
@@ -26,7 +28,7 @@ with open('loras.json', 'r') as f:
26
  # Initialize the base model
27
  base_model = models[0]
28
  controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union'
29
- #controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union-alpha'
30
  dtype = torch.bfloat16
31
  #dtype = torch.float8_e4m3fn
32
  #device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -128,6 +130,7 @@ def update_selection(evt: gr.SelectData, width, height):
128
 
129
  @spaces.GPU(duration=70)
130
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress=gr.Progress(track_tqdm=True)):
 
131
  global pipe
132
  global taef1
133
  global good_vae
@@ -142,8 +145,8 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
142
  # Generate image
143
  modes, images, scales = get_control_params()
144
  if not cn_on or len(modes) == 0:
145
- pipe.vae = taef1
146
  pipe.to("cuda")
 
147
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
148
  progress(0, desc="Start Inference.")
149
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
@@ -159,11 +162,11 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
159
  ):
160
  yield img
161
  else:
 
162
  if controlnet is not None: controlnet.to("cuda")
163
  if controlnet_union is not None: controlnet_union.to("cuda")
164
  pipe.vae = good_vae
165
  pipe.controlnet = controlnet
166
- pipe.to("cuda")
167
  progress(0, desc="Start Inference with ControlNet.")
168
  for img in pipe(
169
  prompt=prompt_mash,
@@ -201,7 +204,7 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
201
 
202
  if translate_on: prompt = translate_to_en(prompt)
203
 
204
- prompt_mash = prompt
205
  if is_valid_lora(lora_json):
206
  # Load External LoRA weights
207
  with calculateDuration("Loading External LoRA weights"):
@@ -215,13 +218,13 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
215
  if(trigger_word):
216
  if "trigger_position" in selected_lora:
217
  if selected_lora["trigger_position"] == "prepend":
218
- prompt_mash = f"{trigger_word} {prompt}"
219
  else:
220
- prompt_mash = f"{prompt} {trigger_word}"
221
  else:
222
- prompt_mash = f"{trigger_word} {prompt}"
223
  else:
224
- prompt_mash = prompt
225
  # Load LoRA weights
226
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
227
  if "weights" in selected_lora:
@@ -245,7 +248,7 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
245
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
246
  yield image, seed, gr.update(value=progress_bar, visible=True)
247
 
248
- yield final_image, seed, gr.update(value=progress_bar, visible=False)
249
 
250
  def get_huggingface_safetensors(link):
251
  split_link = link.split("/")
@@ -338,7 +341,7 @@ css = '''
338
  #progress .generating{display:none}
339
  .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
340
  .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
341
- .info {text-align:center; display:inline-flex; align-items:center !important}
342
  '''
343
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache=(60, 3600)) as app:
344
  with gr.Tab("FLUX LoRA the Explorer"):
@@ -460,7 +463,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
460
  with gr.Row():
461
  cn_image_ref[i] = gr.Image(label="Image Reference", type="pil", format="png", height=256, sources=["upload", "clipboard"], show_share_button=False)
462
  cn_image[i] = gr.Image(label="Control Image", type="pil", format="png", height=256, show_share_button=False, interactive=False)
463
-
464
  gallery.select(
465
  update_selection,
466
  inputs=[width, height],
@@ -643,84 +646,84 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
643
  pg_generate_text_button = gr.Button("Generate Prompt with LLM (Llama 3.1 70B)")
644
  pg_text_output = gr.Textbox(label="Generated Text", lines=10)
645
 
646
- description_ui()
647
-
648
- def create_caption(image):
649
- if image is not None:
650
- return florence_caption(image)
651
- return ""
652
 
653
- pg_create_caption_button.click(
654
- create_caption,
655
- inputs=[pg_input_image],
656
- outputs=[pg_caption_output]
657
- )
658
 
659
- def generate_prompt_with_dynamic_seed(*args):
660
- # Generate a new random seed
661
- dynamic_seed = random.randint(0, 1000000)
662
-
663
- # Call the generate_prompt function with the dynamic seed
664
- result = prompt_generator.generate_prompt(dynamic_seed, *args)
665
-
666
- # Return the result along with the used seed
667
- return [dynamic_seed] + list(result)
668
-
669
- pg_generate_button.click(
670
- generate_prompt_with_dynamic_seed,
671
- inputs=[pg_custom, pg_subject, pg_gender, pg_artform, pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles,
672
- pg_additional_details, pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform,
673
- pg_place, pg_lighting, pg_clothing, pg_composition, pg_pose, pg_background, pg_input_image],
674
- outputs=[gr.Number(label="Used Seed", visible=False), pg_output, gr.Number(visible=False), pg_t5xxl_output, pg_clip_l_output, pg_clip_g_output]
675
- ) #
676
-
677
- pg_add_caption_button.click(
678
- prompt_generator.add_caption_to_prompt,
679
- inputs=[pg_output, pg_caption_output],
680
- outputs=[pg_output]
681
- )
682
 
683
- pg_generate_text_button.click(
684
- huggingface_node.generate,
685
- inputs=[pg_output, pg_happy_talk, pg_compress, pg_compression_level, pg_poster, pg_custom_base_prompt],
686
- outputs=pg_text_output
687
- )
688
 
689
- def update_all_options(choice):
690
- updates = {}
691
- if choice == "Disabled":
692
- for dropdown in [
693
- pg_artform, pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles, pg_clothing,
694
- pg_place, pg_lighting, pg_composition, pg_pose, pg_background, pg_additional_details,
695
- pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform
696
- ]:
697
- updates[dropdown] = gr.update(value="disabled")
698
- elif choice == "Random":
699
- for dropdown in [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
700
  pg_artform, pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles, pg_clothing,
701
  pg_place, pg_lighting, pg_composition, pg_pose, pg_background, pg_additional_details,
702
  pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform
703
- ]:
704
- updates[dropdown] = gr.update(value="random")
705
- else: # No Figure Random
706
- for dropdown in [pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles, pg_clothing, pg_pose, pg_additional_details]:
707
- updates[dropdown] = gr.update(value="disabled")
708
- for dropdown in [pg_artform, pg_place, pg_lighting, pg_composition, pg_background, pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform]:
709
- updates[dropdown] = gr.update(value="random")
710
- return updates
711
-
712
- pg_global_option.change(
713
- update_all_options,
714
- inputs=[pg_global_option],
715
- outputs=[
716
- pg_artform, pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles, pg_clothing,
717
- pg_place, pg_lighting, pg_composition, pg_pose, pg_background, pg_additional_details,
718
- pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform
719
- ]
720
- )
721
 
 
722
  gr.LoginButton()
723
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
724
 
 
725
  app.queue()
726
  app.launch()
 
8
  from huggingface_hub import HfFileSystem, ModelCard
9
  import random
10
  import time
11
+ import os
12
 
13
+ from env import models, num_loras, num_cns
14
+ from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
15
+ description_ui, compose_lora_json, is_valid_lora, fuse_loras, save_image,
16
+ get_trigger_word, enhance_prompt, deselect_lora, set_control_union_image,
17
  get_control_union_mode, set_control_union_mode, get_control_params, translate_to_en)
18
  from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_json,
19
  download_my_lora, get_all_lora_tupled_list, apply_lora_prompt,
 
28
  # Initialize the base model
29
  base_model = models[0]
30
  controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union'
31
+ #controlnet_model_union_repo = 'Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro'
32
  dtype = torch.bfloat16
33
  #dtype = torch.float8_e4m3fn
34
  #device = "cuda" if torch.cuda.is_available() else "cpu"
 
130
 
131
  @spaces.GPU(duration=70)
132
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress=gr.Progress(track_tqdm=True)):
133
+ from diffusers.utils import load_image
134
  global pipe
135
  global taef1
136
  global good_vae
 
145
  # Generate image
146
  modes, images, scales = get_control_params()
147
  if not cn_on or len(modes) == 0:
 
148
  pipe.to("cuda")
149
+ pipe.vae = taef1
150
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
151
  progress(0, desc="Start Inference.")
152
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
 
162
  ):
163
  yield img
164
  else:
165
+ pipe.to("cuda")
166
  if controlnet is not None: controlnet.to("cuda")
167
  if controlnet_union is not None: controlnet_union.to("cuda")
168
  pipe.vae = good_vae
169
  pipe.controlnet = controlnet
 
170
  progress(0, desc="Start Inference with ControlNet.")
171
  for img in pipe(
172
  prompt=prompt_mash,
 
204
 
205
  if translate_on: prompt = translate_to_en(prompt)
206
 
207
+ prompt_mash = prompt + get_model_trigger(last_model)
208
  if is_valid_lora(lora_json):
209
  # Load External LoRA weights
210
  with calculateDuration("Loading External LoRA weights"):
 
218
  if(trigger_word):
219
  if "trigger_position" in selected_lora:
220
  if selected_lora["trigger_position"] == "prepend":
221
+ prompt_mash = f"{trigger_word} {prompt_mash}"
222
  else:
223
+ prompt_mash = f"{prompt_mash} {trigger_word}"
224
  else:
225
+ prompt_mash = f"{trigger_word} {prompt_mash}"
226
  else:
227
+ prompt_mash = prompt_mash
228
  # Load LoRA weights
229
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
230
  if "weights" in selected_lora:
 
248
  progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
249
  yield image, seed, gr.update(value=progress_bar, visible=True)
250
 
251
+ yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(value=progress_bar, visible=False)
252
 
253
  def get_huggingface_safetensors(link):
254
  split_link = link.split("/")
 
341
  #progress .generating{display:none}
342
  .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
343
  .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
344
+ .info {text-align:center; !important}
345
  '''
346
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache=(60, 3600)) as app:
347
  with gr.Tab("FLUX LoRA the Explorer"):
 
463
  with gr.Row():
464
  cn_image_ref[i] = gr.Image(label="Image Reference", type="pil", format="png", height=256, sources=["upload", "clipboard"], show_share_button=False)
465
  cn_image[i] = gr.Image(label="Control Image", type="pil", format="png", height=256, show_share_button=False, interactive=False)
466
+
467
  gallery.select(
468
  update_selection,
469
  inputs=[width, height],
 
646
  pg_generate_text_button = gr.Button("Generate Prompt with LLM (Llama 3.1 70B)")
647
  pg_text_output = gr.Textbox(label="Generated Text", lines=10)
648
 
649
+ def create_caption(image):
650
+ if image is not None:
651
+ return florence_caption(image)
652
+ return ""
 
 
653
 
654
+ pg_create_caption_button.click(
655
+ create_caption,
656
+ inputs=[pg_input_image],
657
+ outputs=[pg_caption_output]
658
+ )
659
 
660
+ def generate_prompt_with_dynamic_seed(*args):
661
+ # Generate a new random seed
662
+ dynamic_seed = random.randint(0, 1000000)
663
+
664
+ # Call the generate_prompt function with the dynamic seed
665
+ result = prompt_generator.generate_prompt(dynamic_seed, *args)
666
+
667
+ # Return the result along with the used seed
668
+ return [dynamic_seed] + list(result)
669
+
670
+ pg_generate_button.click(
671
+ generate_prompt_with_dynamic_seed,
672
+ inputs=[pg_custom, pg_subject, pg_gender, pg_artform, pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles,
673
+ pg_additional_details, pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform,
674
+ pg_place, pg_lighting, pg_clothing, pg_composition, pg_pose, pg_background, pg_input_image],
675
+ outputs=[gr.Number(label="Used Seed", visible=False), pg_output, gr.Number(visible=False), pg_t5xxl_output, pg_clip_l_output, pg_clip_g_output]
676
+ ) #
677
+
678
+ pg_add_caption_button.click(
679
+ prompt_generator.add_caption_to_prompt,
680
+ inputs=[pg_output, pg_caption_output],
681
+ outputs=[pg_output]
682
+ )
683
 
684
+ pg_generate_text_button.click(
685
+ huggingface_node.generate,
686
+ inputs=[pg_output, pg_happy_talk, pg_compress, pg_compression_level, pg_poster, pg_custom_base_prompt],
687
+ outputs=pg_text_output
688
+ )
689
 
690
+ def update_all_options(choice):
691
+ updates = {}
692
+ if choice == "Disabled":
693
+ for dropdown in [
694
+ pg_artform, pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles, pg_clothing,
695
+ pg_place, pg_lighting, pg_composition, pg_pose, pg_background, pg_additional_details,
696
+ pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform
697
+ ]:
698
+ updates[dropdown] = gr.update(value="disabled")
699
+ elif choice == "Random":
700
+ for dropdown in [
701
+ pg_artform, pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles, pg_clothing,
702
+ pg_place, pg_lighting, pg_composition, pg_pose, pg_background, pg_additional_details,
703
+ pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform
704
+ ]:
705
+ updates[dropdown] = gr.update(value="random")
706
+ else: # No Figure Random
707
+ for dropdown in [pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles, pg_clothing, pg_pose, pg_additional_details]:
708
+ updates[dropdown] = gr.update(value="disabled")
709
+ for dropdown in [pg_artform, pg_place, pg_lighting, pg_composition, pg_background, pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform]:
710
+ updates[dropdown] = gr.update(value="random")
711
+ return updates
712
+
713
+ pg_global_option.change(
714
+ update_all_options,
715
+ inputs=[pg_global_option],
716
+ outputs=[
717
  pg_artform, pg_photo_type, pg_body_types, pg_default_tags, pg_roles, pg_hairstyles, pg_clothing,
718
  pg_place, pg_lighting, pg_composition, pg_pose, pg_background, pg_additional_details,
719
  pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform
720
+ ]
721
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
722
 
723
+ description_ui()
724
  gr.LoginButton()
725
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
726
 
727
+
728
  app.queue()
729
  app.launch()
env.py CHANGED
@@ -6,6 +6,61 @@ hf_token = os.environ.get("HF_TOKEN")
6
  hf_read_token = os.environ.get('HF_READ_TOKEN') # only use for private repo
7
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  # List all Models for specified user
10
  HF_MODEL_USER_LIKES = [] # sorted by number of likes
11
  HF_MODEL_USER_EX = [] # sorted by a special rule
@@ -37,4 +92,3 @@ HF_LORA_PRIVATE_REPOS2 = [] # to be sorted as 1 repo
37
  HF_LORA_PRIVATE_REPOS = HF_LORA_PRIVATE_REPOS1 + HF_LORA_PRIVATE_REPOS2
38
  HF_LORA_ESSENTIAL_PRIVATE_REPO = '' # to be downloaded on run app
39
  HF_VAE_PRIVATE_REPO = ''
40
-
 
6
  hf_read_token = os.environ.get('HF_READ_TOKEN') # only use for private repo
7
 
8
 
9
+ num_loras = 3
10
+ num_cns = 2
11
+
12
+
13
+ models = [
14
+ "camenduru/FLUX.1-dev-diffusers",
15
+ "black-forest-labs/FLUX.1-schnell",
16
+ "sayakpaul/FLUX.1-merged",
17
+ "ostris/OpenFLUX.1",
18
+ "John6666/flux-dev2pro-bf16-flux",
19
+ "John6666/flux1-dev-minus-v1-fp8-flux",
20
+ "John6666/hyper-flux1-dev-fp8-flux",
21
+ "John6666/blue-pencil-flux1-v010-fp8-flux",
22
+ "Raelina/Raemu-Flux",
23
+ "John6666/raemu-flux-v10-fp8-flux",
24
+ "John6666/copycat-flux-test-fp8-v11-fp8-flux",
25
+ "John6666/wai-ani-flux-v10forfp8-fp8-flux",
26
+ "John6666/flux-dev8-anime-nsfw-fp8-flux",
27
+ "John6666/nepotism-fuxdevschnell-v3aio-fp8-flux",
28
+ "John6666/sumeshi-flux1s-v002e-fp8-flux",
29
+ "John6666/fca-style-v33-x10-8step-fp8-flux",
30
+ "John6666/lyh-anime-v10f1-fp8-flux",
31
+ "John6666/lyh-dalle-anime-v12dalle-fp8-flux",
32
+ "John6666/lyh-anime-flux-v2a1-fp8-flux",
33
+ "John6666/glimmerkin-flux-cute-v10-fp8-flux",
34
+ "John6666/niji-style-flux-devfp8-fp8-flux",
35
+ "John6666/niji56-style-v3-fp8-flux",
36
+ "John6666/xe-anime-flux-v04-fp8-flux",
37
+ "John6666/xe-figure-flux-01-fp8-flux",
38
+ "John6666/xe-pixel-flux-01-fp8-flux",
39
+ "John6666/xe-guoman-flux-02-fp8-flux",
40
+ "John6666/carnival-unchained-v10-fp8-flux",
41
+ "John6666/fluxunchained-artfulnsfw-fut516xfp8e4m3fnv11-fp8-flux",
42
+ "John6666/fastflux-unchained-t5f16-fp8-flux",
43
+ "John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux",
44
+ "John6666/nsfw-master-flux-lora-merged-with-flux1-dev-fp16-v10-fp8-flux",
45
+ "John6666/the-araminta-flux1a1-fp8-flux",
46
+ "John6666/acorn-is-spinning-flux-v11-fp8-flux",
47
+ "John6666/real-horny-pro-fp8-flux",
48
+ "John6666/centerfold-flux-v20fp8e5m2-fp8-flux",
49
+ "John6666/jib-mix-flux-v208stephyper-fp8-flux",
50
+ "John6666/sapianf-nude-men-women-for-flux-v20fp16-fp8-flux",
51
+ "John6666/flux-asian-realistic-v10-fp8-flux",
52
+ "John6666/fluxasiandoll-v10-fp8-flux",
53
+ "John6666/xe-asian-flux-01-fp8-flux",
54
+ "John6666/fluxescore-dev-v10fp16-fp8-flux",
55
+ # "",
56
+ ]
57
+
58
+ model_trigger = {
59
+ "Raelina/Raemu-Flux": "anime",
60
+ "John6666/raemu-flux-v10-fp8-flux": "anime",
61
+ "John6666/fca-style-v33-x10-8step-fp8-flux": "fca_style",
62
+ }
63
+
64
  # List all Models for specified user
65
  HF_MODEL_USER_LIKES = [] # sorted by number of likes
66
  HF_MODEL_USER_EX = [] # sorted by a special rule
 
92
  HF_LORA_PRIVATE_REPOS = HF_LORA_PRIVATE_REPOS1 + HF_LORA_PRIVATE_REPOS2
93
  HF_LORA_ESSENTIAL_PRIVATE_REPO = '' # to be downloaded on run app
94
  HF_VAE_PRIVATE_REPO = ''
 
mod.py CHANGED
@@ -5,6 +5,7 @@ from PIL import Image
5
  from pathlib import Path
6
  import gc
7
  import subprocess
 
8
 
9
 
10
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
@@ -13,54 +14,6 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
13
  torch.set_grad_enabled(False)
14
 
15
 
16
- models = [
17
- "camenduru/FLUX.1-dev-diffusers",
18
- "black-forest-labs/FLUX.1-schnell",
19
- "sayakpaul/FLUX.1-merged",
20
- "ostris/OpenFLUX.1",
21
- "John6666/flux-dev2pro-bf16-flux",
22
- "John6666/flux1-dev-minus-v1-fp8-flux",
23
- "John6666/hyper-flux1-dev-fp8-flux",
24
- "John6666/blue-pencil-flux1-v010-fp8-flux",
25
- "Raelina/Raemu-Flux",
26
- "John6666/raemu-flux-v10-fp8-flux",
27
- "John6666/copycat-flux-test-fp8-v11-fp8-flux",
28
- "John6666/wai-ani-flux-v10forfp8-fp8-flux",
29
- "John6666/flux-dev8-anime-nsfw-fp8-flux",
30
- "John6666/nepotism-fuxdevschnell-v3aio-fp8-flux",
31
- "John6666/sumeshi-flux1s-v002e-fp8-flux",
32
- "John6666/fca-style-v33-x10-8step-fp8-flux",
33
- "John6666/lyh-anime-v10f1-fp8-flux",
34
- "John6666/lyh-dalle-anime-v12dalle-fp8-flux",
35
- "John6666/lyh-anime-flux-v2a1-fp8-flux",
36
- "John6666/glimmerkin-flux-cute-v10-fp8-flux",
37
- "John6666/niji-style-flux-devfp8-fp8-flux",
38
- "John6666/niji56-style-v3-fp8-flux",
39
- "John6666/xe-anime-flux-v04-fp8-flux",
40
- "John6666/xe-figure-flux-01-fp8-flux",
41
- "John6666/xe-pixel-flux-01-fp8-flux",
42
- "John6666/xe-guoman-flux-02-fp8-flux",
43
- "John6666/carnival-unchained-v10-fp8-flux",
44
- "John6666/fluxunchained-artfulnsfw-fut516xfp8e4m3fnv11-fp8-flux",
45
- "John6666/fastflux-unchained-t5f16-fp8-flux",
46
- "John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux",
47
- "John6666/nsfw-master-flux-lora-merged-with-flux1-dev-fp16-v10-fp8-flux",
48
- "John6666/the-araminta-flux1a1-fp8-flux",
49
- "John6666/acorn-is-spinning-flux-v11-fp8-flux",
50
- "John6666/real-horny-pro-fp8-flux",
51
- "John6666/centerfold-flux-v20fp8e5m2-fp8-flux",
52
- "John6666/jib-mix-flux-v208stephyper-fp8-flux",
53
- "John6666/sapianf-nude-men-women-for-flux-v20fp16-fp8-flux",
54
- "John6666/flux-asian-realistic-v10-fp8-flux",
55
- "John6666/fluxasiandoll-v10-fp8-flux",
56
- "John6666/xe-asian-flux-01-fp8-flux",
57
- "John6666/fluxescore-dev-v10fp16-fp8-flux",
58
- # "",
59
- ]
60
-
61
-
62
- num_loras = 3
63
- num_cns = 2
64
  control_images = [None] * num_cns
65
  control_modes = [-1] * num_cns
66
  control_scales = [0] * num_cns
@@ -309,6 +262,12 @@ def get_trigger_word(lorajson: list[dict]):
309
  return trigger
310
 
311
 
 
 
 
 
 
 
312
  # https://huggingface.co/docs/diffusers/v0.23.1/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora
313
  # https://github.com/huggingface/diffusers/issues/4919
314
  def fuse_loras(pipe, lorajson: list[dict]):
@@ -374,6 +333,27 @@ def enhance_prompt(input_prompt):
374
  return enhanced_text
375
 
376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
  load_prompt_enhancer.zerogpu = True
378
  fuse_loras.zerogpu = True
379
  preprocess_image.zerogpu = True
 
5
  from pathlib import Path
6
  import gc
7
  import subprocess
8
+ from env import num_cns, model_trigger
9
 
10
 
11
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
 
14
  torch.set_grad_enabled(False)
15
 
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  control_images = [None] * num_cns
18
  control_modes = [-1] * num_cns
19
  control_scales = [0] * num_cns
 
262
  return trigger
263
 
264
 
265
+ def get_model_trigger(model_name: str):
266
+ trigger = ""
267
+ if model_name in model_trigger.keys(): trigger += ", " + model_trigger[model_name]
268
+ return trigger
269
+
270
+
271
  # https://huggingface.co/docs/diffusers/v0.23.1/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora
272
  # https://github.com/huggingface/diffusers/issues/4919
273
  def fuse_loras(pipe, lorajson: list[dict]):
 
333
  return enhanced_text
334
 
335
 
336
+ def save_image(image, savefile, modelname, prompt, height, width, steps, cfg, seed):
337
+ import uuid
338
+ from PIL import Image, PngImagePlugin
339
+ import json
340
+ try:
341
+ if savefile is None: savefile = f"{modelname.split('/')[-1]}_{str(uuid.uuid4())}.png"
342
+ metadata = {"prompt": prompt, "Model": {"Model": modelname.split("/")[-1]}}
343
+ metadata["num_inference_steps"] = steps
344
+ metadata["guidance_scale"] = cfg
345
+ metadata["seed"] = seed
346
+ metadata["resolution"] = f"{width} x {height}"
347
+ metadata_str = json.dumps(metadata)
348
+ info = PngImagePlugin.PngInfo()
349
+ info.add_text("metadata", metadata_str)
350
+ image.save(savefile, "PNG", pnginfo=info)
351
+ return str(Path(savefile).resolve())
352
+ except Exception as e:
353
+ print(f"Failed to save image file: {e}")
354
+ raise Exception(f"Failed to save image file:") from e
355
+
356
+
357
  load_prompt_enhancer.zerogpu = True
358
  fuse_loras.zerogpu = True
359
  preprocess_image.zerogpu = True
requirements.txt CHANGED
@@ -15,4 +15,5 @@ opencv-python
15
  deepspeed
16
  mediapipe
17
  openai==1.37.0
18
- translatepy
 
 
15
  deepspeed
16
  mediapipe
17
  openai==1.37.0
18
+ translatepy
19
+ xformers