John6666 commited on
Commit
c9302f4
1 Parent(s): b8d4104

Upload 5 files

Browse files
Files changed (4) hide show
  1. app.py +112 -24
  2. loras.json +34 -9
  3. mod.py +12 -0
  4. requirements.txt +1 -1
app.py CHANGED
@@ -2,16 +2,17 @@ import spaces
2
  import gradio as gr
3
  import json
4
  import torch
5
- from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
6
  from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images
7
- from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel
 
8
  from huggingface_hub import HfFileSystem, ModelCard
9
  import random
10
  import time
11
 
12
  from env import models, num_loras, num_cns
13
  from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
14
- description_ui, compose_lora_json, is_valid_lora, fuse_loras, save_image,
15
  get_trigger_word, enhance_prompt, deselect_lora, set_control_union_image,
16
  get_control_union_mode, set_control_union_mode, get_control_params, translate_to_en)
17
  from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_json,
@@ -34,6 +35,8 @@ dtype = torch.bfloat16
34
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
35
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype)
36
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1)
 
 
37
  controlnet_union = None
38
  controlnet = None
39
  last_model = models[0]
@@ -45,9 +48,11 @@ MAX_SEED = 2**32-1
45
 
46
  # https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union
47
  # https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union
 
48
  #@spaces.GPU()
49
- def change_base_model(repo_id: str, cn_on: bool, progress=gr.Progress(track_tqdm=True)): # , progress=gr.Progress(track_tqdm=True) # gradio.exceptions.Error: 'Model load Error: too many values to unpack (expected 2)'
50
  global pipe
 
51
  global taef1
52
  global good_vae
53
  global controlnet_union
@@ -56,8 +61,9 @@ def change_base_model(repo_id: str, cn_on: bool, progress=gr.Progress(track_tqdm
56
  global last_cn_on
57
  global dtype
58
  try:
59
- if (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(visible=True)
60
  pipe.to("cpu")
 
61
  good_vae.to("cpu")
62
  taef1.to("cpu")
63
  if controlnet is not None: controlnet.to("cpu")
@@ -69,6 +75,8 @@ def change_base_model(repo_id: str, cn_on: bool, progress=gr.Progress(track_tqdm
69
  controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=dtype)
70
  controlnet = FluxMultiControlNetModel([controlnet_union])
71
  pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype)
 
 
72
  last_model = repo_id
73
  last_cn_on = cn_on
74
  progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
@@ -77,6 +85,8 @@ def change_base_model(repo_id: str, cn_on: bool, progress=gr.Progress(track_tqdm
77
  progress(0, desc=f"Loading model: {repo_id}")
78
  print(f"Loading model: {repo_id}")
79
  pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
 
 
80
  last_model = repo_id
81
  last_cn_on = cn_on
82
  progress(1, desc=f"Model loaded: {repo_id}")
@@ -183,7 +193,67 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
183
  print(e)
184
  raise gr.Error(f"Inference Error: {e}") from e
185
 
186
- def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  lora_scale, lora_json, cn_on, translate_on, progress=gr.Progress(track_tqdm=True)):
188
  global pipe
189
  if selected_index is None and not is_valid_lora(lora_json):
@@ -195,6 +265,8 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
195
  try:
196
  pipe.unfuse_lora()
197
  pipe.unload_lora_weights()
 
 
198
  except Exception as e:
199
  print(e)
200
 
@@ -225,10 +297,16 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
225
  prompt_mash = prompt_mash
226
  # Load LoRA weights
227
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
228
- if "weights" in selected_lora:
229
- pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
 
 
 
230
  else:
231
- pipe.load_lora_weights(lora_path)
 
 
 
232
 
233
  # Set random seed for reproducibility
234
  with calculateDuration("Randomizing seed"):
@@ -236,17 +314,20 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
236
  seed = random.randint(0, MAX_SEED)
237
 
238
  progress(0, desc="Running Inference.")
239
- image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress)
240
- # Consume the generator to get the final image
241
- final_image = None
242
- step_counter = 0
243
- for image in image_generator:
244
- step_counter+=1
245
- final_image = image
246
- progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
247
- yield image, seed, gr.update(value=progress_bar, visible=True)
248
-
249
- yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(value=progress_bar, visible=False)
 
 
 
250
 
251
  def get_huggingface_safetensors(link):
252
  split_link = link.split("/")
@@ -392,8 +473,14 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
392
  model_info = gr.Markdown(elem_classes="info")
393
  with gr.Row():
394
  with gr.Accordion("Advanced Settings", open=False):
 
 
 
 
 
395
  with gr.Column():
396
  with gr.Row():
 
397
  width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
398
  height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
399
  with gr.Row():
@@ -402,7 +489,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
402
  with gr.Row():
403
  randomize_seed = gr.Checkbox(True, label="Randomize seed")
404
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
405
- lora_scale = gr.Slider(label="LoRA Scale", minimum=-3, maximum=3, step=0.01, value=0.95)
406
  with gr.Accordion("External LoRA", open=True):
407
  with gr.Column():
408
  lora_repo_json = gr.JSON(value=[{}] * num_loras, visible=False)
@@ -486,20 +573,21 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
486
  gr.on(
487
  triggers=[generate_button.click, prompt.submit],
488
  fn=change_base_model,
489
- inputs=[model_name, cn_on],
490
  outputs=[result],
491
  queue=True,
492
  show_api=False,
493
  trigger_mode="once",
494
  ).success(
495
  fn=run_lora,
496
- inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
497
  lora_scale, lora_repo_json, cn_on, auto_trans],
498
  outputs=[result, seed, progress_bar],
499
  queue=True,
500
  show_api=True,
501
  )
502
 
 
503
  deselect_lora_button.click(deselect_lora, None, [prompt, selected_info, selected_index, width, height], queue=False, show_api=False)
504
  gr.on(
505
  triggers=[model_name.change, cn_on.change],
@@ -509,7 +597,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
509
  queue=False,
510
  show_api=False,
511
  trigger_mode="once",
512
- ).then(change_base_model, [model_name, cn_on], [result], queue=True, show_api=False)
513
  prompt_enhance.click(enhance_prompt, [prompt], [prompt], queue=False, show_api=False)
514
 
515
  gr.on(
 
2
  import gradio as gr
3
  import json
4
  import torch
5
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
6
  from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images
7
+ from diffusers.utils import load_image
8
+ from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel, FluxControlNetImg2ImgPipeline
9
  from huggingface_hub import HfFileSystem, ModelCard
10
  import random
11
  import time
12
 
13
  from env import models, num_loras, num_cns
14
  from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
15
+ description_ui, compose_lora_json, is_valid_lora, fuse_loras, save_image, preprocess_i2i_image,
16
  get_trigger_word, enhance_prompt, deselect_lora, set_control_union_image,
17
  get_control_union_mode, set_control_union_mode, get_control_params, translate_to_en)
18
  from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_json,
 
35
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
36
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype)
37
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1)
38
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
39
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype)
40
  controlnet_union = None
41
  controlnet = None
42
  last_model = models[0]
 
48
 
49
  # https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union
50
  # https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union
51
+ # https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux
52
  #@spaces.GPU()
53
+ def change_base_model(repo_id: str, cn_on: bool, disable_model_cache: bool, progress=gr.Progress(track_tqdm=True)):
54
  global pipe
55
+ global pipe_i2i
56
  global taef1
57
  global good_vae
58
  global controlnet_union
 
61
  global last_cn_on
62
  global dtype
63
  try:
64
+ if not disable_model_cache and (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(visible=True)
65
  pipe.to("cpu")
66
+ pipe_i2i.to("cpu")
67
  good_vae.to("cpu")
68
  taef1.to("cpu")
69
  if controlnet is not None: controlnet.to("cpu")
 
75
  controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=dtype)
76
  controlnet = FluxMultiControlNetModel([controlnet_union])
77
  pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype)
78
+ pipe_i2i = FluxControlNetImg2ImgPipeline.from_pretrained(repo_id, controlnet=controlnet, vae=None, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
79
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype)
80
  last_model = repo_id
81
  last_cn_on = cn_on
82
  progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
 
85
  progress(0, desc=f"Loading model: {repo_id}")
86
  print(f"Loading model: {repo_id}")
87
  pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
88
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(repo_id, vae=None, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
89
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype)
90
  last_model = repo_id
91
  last_cn_on = cn_on
92
  progress(1, desc=f"Model loaded: {repo_id}")
 
193
  print(e)
194
  raise gr.Error(f"Inference Error: {e}") from e
195
 
196
+ @spaces.GPU(duration=70)
197
+ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed, cn_on, progress=gr.Progress(track_tqdm=True)):
198
+ global pipe_i2i
199
+ global good_vae
200
+ global controlnet
201
+ global controlnet_union
202
+ try:
203
+ good_vae.to("cuda")
204
+ generator = torch.Generator(device="cuda").manual_seed(int(float(seed)))
205
+ image_input = load_image(image_input_path)
206
+
207
+ with calculateDuration("Generating image"):
208
+ # Generate image
209
+ modes, images, scales = get_control_params()
210
+ if True or not cn_on or len(modes) == 0:
211
+ pipe_i2i.to("cuda")
212
+ pipe_i2i.vae = good_vae
213
+ image_input = load_image(image_input_path)
214
+ progress(0, desc="Start I2I Inference.")
215
+ final_image = pipe_i2i(
216
+ prompt=prompt_mash,
217
+ image=image_input,
218
+ strength=image_strength,
219
+ num_inference_steps=steps,
220
+ guidance_scale=cfg_scale,
221
+ width=width,
222
+ height=height,
223
+ generator=generator,
224
+ joint_attention_kwargs={"scale": lora_scale},
225
+ output_type="pil",
226
+ ).images[0]
227
+ return final_image
228
+ else: # omitted
229
+ pipe_i2i.to("cuda")
230
+ pipe_i2i.vae = good_vae
231
+ image_input = load_image(image_input_path)
232
+ if controlnet_union is not None: controlnet_union.to("cuda")
233
+ if controlnet is not None: controlnet.to("cuda")
234
+ pipe_i2i.enable_model_cpu_offload()
235
+ progress(0, desc="Start I2I Inference with ControlNet.")
236
+ final_image = pipe_i2i(
237
+ prompt=prompt_mash,
238
+ control_image=images,
239
+ control_mode=modes,
240
+ image=image_input,
241
+ strength=image_strength,
242
+ num_inference_steps=steps,
243
+ guidance_scale=cfg_scale,
244
+ width=width,
245
+ height=height,
246
+ controlnet_conditioning_scale=scales,
247
+ generator=generator,
248
+ joint_attention_kwargs={"scale": lora_scale},
249
+ output_type="pil",
250
+ ).images[0]
251
+ return final_image
252
+ except Exception as e:
253
+ print(e)
254
+ raise gr.Error(f"I2I Inference Error: {e}") from e
255
+
256
+ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
257
  lora_scale, lora_json, cn_on, translate_on, progress=gr.Progress(track_tqdm=True)):
258
  global pipe
259
  if selected_index is None and not is_valid_lora(lora_json):
 
265
  try:
266
  pipe.unfuse_lora()
267
  pipe.unload_lora_weights()
268
+ pipe_i2i.unfuse_lora()
269
+ pipe_i2i.unload_lora_weights()
270
  except Exception as e:
271
  print(e)
272
 
 
297
  prompt_mash = prompt_mash
298
  # Load LoRA weights
299
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
300
+ if(image_input is not None):
301
+ if "weights" in selected_lora:
302
+ pipe_i2i.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
303
+ else:
304
+ pipe_i2i.load_lora_weights(lora_path)
305
  else:
306
+ if "weights" in selected_lora:
307
+ pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
308
+ else:
309
+ pipe.load_lora_weights(lora_path)
310
 
311
  # Set random seed for reproducibility
312
  with calculateDuration("Randomizing seed"):
 
314
  seed = random.randint(0, MAX_SEED)
315
 
316
  progress(0, desc="Running Inference.")
317
+ if(image_input is not None):
318
+ final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed, cn_on, progress)
319
+ yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(visible=False)
320
+ else:
321
+ image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress)
322
+ # Consume the generator to get the final image
323
+ final_image = None
324
+ step_counter = 0
325
+ for image in image_generator:
326
+ step_counter+=1
327
+ final_image = image
328
+ progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
329
+ yield image, seed, gr.update(value=progress_bar, visible=True)
330
+ yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(value=progress_bar, visible=False)
331
 
332
  def get_huggingface_safetensors(link):
333
  split_link = link.split("/")
 
473
  model_info = gr.Markdown(elem_classes="info")
474
  with gr.Row():
475
  with gr.Accordion("Advanced Settings", open=False):
476
+ with gr.Row():
477
+ input_image = gr.Image(label="Input image", type="filepath", height=256, sources=["upload", "clipboard"], show_share_button=False)
478
+ with gr.Column():
479
+ image_strength = gr.Slider(label="Image Strength", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
480
+ input_image_preprocess = gr.Checkbox(True, label="Preprocess Input image")
481
  with gr.Column():
482
  with gr.Row():
483
+ lora_scale = gr.Slider(label="LoRA Scale", minimum=-3, maximum=3, step=0.01, value=0.95)
484
  width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
485
  height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
486
  with gr.Row():
 
489
  with gr.Row():
490
  randomize_seed = gr.Checkbox(True, label="Randomize seed")
491
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
492
+ disable_model_cache = gr.Checkbox(False, label="Disable model caching")
493
  with gr.Accordion("External LoRA", open=True):
494
  with gr.Column():
495
  lora_repo_json = gr.JSON(value=[{}] * num_loras, visible=False)
 
573
  gr.on(
574
  triggers=[generate_button.click, prompt.submit],
575
  fn=change_base_model,
576
+ inputs=[model_name, cn_on, disable_model_cache],
577
  outputs=[result],
578
  queue=True,
579
  show_api=False,
580
  trigger_mode="once",
581
  ).success(
582
  fn=run_lora,
583
+ inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
584
  lora_scale, lora_repo_json, cn_on, auto_trans],
585
  outputs=[result, seed, progress_bar],
586
  queue=True,
587
  show_api=True,
588
  )
589
 
590
+ input_image.upload(preprocess_i2i_image, [input_image, input_image_preprocess, height, width], [input_image], queue=False, show_api=False)
591
  deselect_lora_button.click(deselect_lora, None, [prompt, selected_info, selected_index, width, height], queue=False, show_api=False)
592
  gr.on(
593
  triggers=[model_name.change, cn_on.change],
 
597
  queue=False,
598
  show_api=False,
599
  trigger_mode="once",
600
+ ).then(change_base_model, [model_name, cn_on, disable_model_cache], [result], queue=True, show_api=False)
601
  prompt_enhance.click(enhance_prompt, [prompt], [prompt], queue=False, show_api=False)
602
 
603
  gr.on(
loras.json CHANGED
@@ -48,12 +48,30 @@
48
  "trigger_word": "in the style of TOK a trtcrd, tarot style",
49
  "aspect": "portrait"
50
  },
 
 
 
 
 
 
 
 
 
 
 
 
51
  {
52
  "image": "https://huggingface.co/alvdansen/softpasty-flux-dev/resolve/main/images/ComfyUI_00814_%20(2).png",
53
  "title": "SoftPasty",
54
  "repo": "alvdansen/softpasty-flux-dev",
55
  "trigger_word": "araminta_illus illustration style"
56
  },
 
 
 
 
 
 
57
  {
58
  "image": "https://huggingface.co/AIWarper/RubberCore1920sCartoonStyle/resolve/main/images/Rub_00006_.png",
59
  "title": "1920s cartoon",
@@ -61,6 +79,13 @@
61
  "trigger_word": "RU883R style",
62
  "trigger_position": "prepend"
63
  },
 
 
 
 
 
 
 
64
  {
65
  "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
66
  "title": "flux-Realism",
@@ -81,6 +106,13 @@
81
  "trigger_word": "mgwr/cine",
82
  "trigger_position": "prepend"
83
  },
 
 
 
 
 
 
 
84
  {
85
  "image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
86
  "title": "animation2k",
@@ -100,12 +132,6 @@
100
  "trigger_word": "ps1 game screenshot,",
101
  "trigger_position": "prepend"
102
  },
103
- {
104
- "image": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration/resolve/main/images/example3.png",
105
- "title": "Blended Realistic Illustration",
106
- "repo": "Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration",
107
- "trigger_word": "artistic style blends reality and illustration elements"
108
- },
109
  {
110
  "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
111
  "title": "flux koda",
@@ -119,7 +145,7 @@
119
  "trigger_word": ""
120
  },
121
  {
122
- "image": "https://pbs.twimg.com/media/GU7NsZPa8AA4Ddl?format=jpg&name=4096x4096",
123
  "title": "Half Illustration",
124
  "repo": "davisbro/half_illustration",
125
  "trigger_word": "in the style of TOK"
@@ -171,8 +197,7 @@
171
  {
172
  "image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
173
  "title": "Boreal",
174
- "repo": "kudzueye/Boreal",
175
- "weights": "boreal-flux-dev-lora-v04_1000_steps.safetensors",
176
  "trigger_word": "phone photo"
177
  },
178
  {
 
48
  "trigger_word": "in the style of TOK a trtcrd, tarot style",
49
  "aspect": "portrait"
50
  },
51
+ {
52
+ "repo": "alvdansen/pola-photo-flux",
53
+ "image": "https://huggingface.co/alvdansen/pola-photo-flux/resolve/main/images/out-2%20(83).webp",
54
+ "trigger_word": ", polaroid style",
55
+ "title": "Polaroid Style"
56
+ },
57
+ {
58
+ "image": "https://huggingface.co/dvyio/flux-lora-the-sims/resolve/main/images/dunBAVBsALOepaE_dsWFI_6b0fef6b0fc4472aa07d00edea7c75b3.jpg",
59
+ "repo": "dvyio/flux-lora-the-sims",
60
+ "trigger_word": ", video game screenshot in the style of THSMS",
61
+ "title": "The Sims style"
62
+ },
63
  {
64
  "image": "https://huggingface.co/alvdansen/softpasty-flux-dev/resolve/main/images/ComfyUI_00814_%20(2).png",
65
  "title": "SoftPasty",
66
  "repo": "alvdansen/softpasty-flux-dev",
67
  "trigger_word": "araminta_illus illustration style"
68
  },
69
+ {
70
+ "image": "https://huggingface.co/dvyio/flux-lora-film-noir/resolve/main/images/S8iWMa0GamEcFkanHHmI8_a232d8b83bb043808742d661dac257f7.jpg",
71
+ "title": "Film Noir",
72
+ "repo": "dvyio/flux-lora-film-noir",
73
+ "trigger_word": "in the style of FLMNR"
74
+ },
75
  {
76
  "image": "https://huggingface.co/AIWarper/RubberCore1920sCartoonStyle/resolve/main/images/Rub_00006_.png",
77
  "title": "1920s cartoon",
 
79
  "trigger_word": "RU883R style",
80
  "trigger_position": "prepend"
81
  },
82
+ {
83
+ "image": "https://huggingface.co/Norod78/JojosoStyle-flux-lora/resolve/main/samples/1725244218477__000004255_1.jpg",
84
+ "title": "JoJo Style",
85
+ "repo": "Norod78/JojosoStyle-flux-lora",
86
+ "trigger_word": "JojosoStyle",
87
+ "trigger_position": "prepend"
88
+ },
89
  {
90
  "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
91
  "title": "flux-Realism",
 
106
  "trigger_word": "mgwr/cine",
107
  "trigger_position": "prepend"
108
  },
109
+ {
110
+ "image": "https://huggingface.co/sWizad/pokemon-trainer-sprites-pixelart-flux/resolve/main/26578915.jpeg",
111
+ "repo": "sWizad/pokemon-trainer-sprites-pixelart-flux",
112
+ "title": "Pokemon Trainer Sprites",
113
+ "trigger_word": "white background, a pixel image of",
114
+ "trigger_position": "prepend"
115
+ },
116
  {
117
  "image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
118
  "title": "animation2k",
 
132
  "trigger_word": "ps1 game screenshot,",
133
  "trigger_position": "prepend"
134
  },
 
 
 
 
 
 
135
  {
136
  "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
137
  "title": "flux koda",
 
145
  "trigger_word": ""
146
  },
147
  {
148
+ "image": "https://huggingface.co/davisbro/half_illustration/resolve/main/images/example3.webp",
149
  "title": "Half Illustration",
150
  "repo": "davisbro/half_illustration",
151
  "trigger_word": "in the style of TOK"
 
197
  {
198
  "image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
199
  "title": "Boreal",
200
+ "repo": "kudzueye/boreal-flux-dev-v2",
 
201
  "trigger_word": "phone photo"
202
  },
203
  {
mod.py CHANGED
@@ -239,6 +239,18 @@ def set_control_union_image(i: int, mode: str, image: Image.Image | None, height
239
  return control_images[i]
240
 
241
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  def compose_lora_json(lorajson: list[dict], i: int, name: str, scale: float, filename: str, trigger: str):
243
  lorajson[i]["name"] = str(name) if name != "None" else ""
244
  lorajson[i]["scale"] = float(scale)
 
239
  return control_images[i]
240
 
241
 
242
+ def preprocess_i2i_image(image_path: str, is_preprocess: bool, height: int, width: int):
243
+ try:
244
+ if not is_preprocess: return image_path
245
+ image_resolution = max(width, height)
246
+ image = Image.open(image_path)
247
+ image_resized = resize_image(expand2square(image.convert("RGB")), image_resolution, image_resolution, False)
248
+ image_resized.save(image_path)
249
+ except Exception as e:
250
+ raise gr.Error(f"Error: {e}")
251
+ return image_path
252
+
253
+
254
  def compose_lora_json(lorajson: list[dict], i: int, name: str, scale: float, filename: str, trigger: str):
255
  lorajson[i]["name"] = str(name) if name != "None" else ""
256
  lorajson[i]["scale"] = float(scale)
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
  spaces
2
  torch
3
- git+https://github.com/huggingface/diffusers
4
  transformers
5
  peft
6
  sentencepiece
 
1
  spaces
2
  torch
3
+ git+https://github.com/huggingface/diffusers@aa73072f1f7014635e3de916cbcf47858f4c37a0
4
  transformers
5
  peft
6
  sentencepiece