John6666 commited on
Commit
de8ac53
1 Parent(s): 0792a15

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +145 -41
  2. loras.json +121 -115
  3. mod.py +1 -1
app.py CHANGED
@@ -5,9 +5,11 @@ import torch
5
  from PIL import Image
6
  import spaces
7
  from diffusers import DiffusionPipeline
 
8
  import copy
9
  import random
10
  import time
 
11
  from mod import (models, clear_cache, get_repo_safetensors, change_base_model,
12
  description_ui, num_loras, compose_lora_json, is_valid_lora, fuse_loras, get_trigger_word, pipe)
13
  from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_json,
@@ -40,7 +42,6 @@ class calculateDuration:
40
  else:
41
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
42
 
43
-
44
  def update_selection(evt: gr.SelectData, width, height):
45
  selected_lora = loras[evt.index]
46
  new_placeholder = f"Type a prompt for {selected_lora['title']}"
@@ -62,7 +63,7 @@ def update_selection(evt: gr.SelectData, width, height):
62
  )
63
 
64
  @spaces.GPU(duration=70)
65
- def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
66
  pipe.to("cuda")
67
  generator = torch.Generator(device="cuda").manual_seed(seed)
68
 
@@ -70,7 +71,7 @@ def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height,
70
  with calculateDuration("Generating image"):
71
  # Generate image
72
  image = pipe(
73
- prompt=f"{prompt} {trigger_word}",
74
  num_inference_steps=steps,
75
  guidance_scale=cfg_scale,
76
  width=width,
@@ -87,15 +88,26 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
87
  # raise gr.Error("You must select a LoRA before proceeding.")
88
  progress(0, desc="Preparing Inference.")
89
 
90
- trigger_word = ""
91
  if is_valid_lora(lora_json):
92
  with calculateDuration("Loading LoRA weights"):
93
  fuse_loras(pipe, lora_json)
94
  trigger_word = get_trigger_word(lora_json)
 
95
  if selected_index is not None:
96
  selected_lora = loras[selected_index]
97
  lora_path = selected_lora["repo"]
98
  trigger_word = selected_lora["trigger_word"]
 
 
 
 
 
 
 
 
 
 
99
  # Load LoRA weights
100
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
101
  if "weights" in selected_lora:
@@ -110,7 +122,7 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
110
 
111
  progress(1, desc="Preparing Inference.")
112
 
113
- image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
114
  if is_valid_lora(lora_json):
115
  pipe.unfuse_lora()
116
  pipe.unload_lora_weights()
@@ -119,6 +131,81 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
119
  clear_cache()
120
  return image, seed
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  run_lora.zerogpu = True
123
 
124
  css = '''
@@ -127,8 +214,12 @@ css = '''
127
  #title h1{font-size: 3em; display:inline-flex; align-items:center}
128
  #title img{width: 100px; margin-right: 0.5em}
129
  #gallery .grid-wrap{height: 10vh}
 
 
 
 
130
  '''
131
- with gr.Blocks(theme=gr.themes.Soft(), fill_width=True, css=css) as app:
132
  with gr.Tab("FLUX LoRA the Explorer"):
133
  title = gr.HTML(
134
  """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA">FLUX LoRA the Explorer Mod</h1>""",
@@ -162,7 +253,11 @@ with gr.Blocks(theme=gr.themes.Soft(), fill_width=True, css=css) as app:
162
  columns=3,
163
  elem_id="gallery"
164
  )
165
-
 
 
 
 
166
  with gr.Column(scale=4):
167
  result = gr.Image(label="Generated Image")
168
 
@@ -185,48 +280,57 @@ with gr.Blocks(theme=gr.themes.Soft(), fill_width=True, css=css) as app:
185
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
186
  lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=0.95)
187
 
188
- with gr.Column():
189
- lora_repo_json = gr.JSON(value=[{}] * num_loras, visible=False)
190
- lora_repo = [None] * num_loras
191
- lora_weights = [None] * num_loras
192
- lora_trigger = [None] * num_loras
193
- lora_wt = [None] * num_loras
194
- lora_info = [None] * num_loras
195
- lora_copy = [None] * num_loras
196
- lora_md = [None] * num_loras
197
- lora_num = [None] * num_loras
198
- for i in range(num_loras):
199
- with gr.Group():
 
 
 
 
 
 
 
 
 
 
 
 
200
  with gr.Row():
201
- lora_repo[i] = gr.Dropdown(label=f"LoRA {int(i+1)} Repo", choices=get_all_lora_tupled_list(), info="Input LoRA Repo ID", value="", allow_custom_value=True)
202
- lora_weights[i] = gr.Dropdown(label=f"LoRA {int(i+1)} Filename", choices=[], info="Optional", value="", allow_custom_value=True)
203
- lora_trigger[i] = gr.Textbox(label=f"LoRA {int(i+1)} Trigger Prompt", lines=1, max_lines=4, value="")
204
- lora_wt[i] = gr.Slider(label=f"LoRA {int(i+1)} Scale", minimum=-2, maximum=2, step=0.01, value=1.00)
 
 
 
205
  with gr.Row():
206
- lora_info[i] = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
207
- lora_copy[i] = gr.Button(value="Copy example to prompt", visible=False)
208
- lora_md[i] = gr.Markdown(value="", visible=False)
209
- lora_num[i] = gr.Number(i, visible=False)
210
- with gr.Accordion("From URL", open=True, visible=True):
211
- with gr.Row():
212
- lora_search_civitai_query = gr.Textbox(label="Query", placeholder="flux", lines=1)
213
- lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Flux.1 D", "Flux.1 S"], value=["Flux.1 D", "Flux.1 S"])
214
- lora_search_civitai_submit = gr.Button("Search on Civitai")
215
- lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
216
- lora_search_civitai_json = gr.JSON(value={}, visible=False)
217
- lora_search_civitai_desc = gr.Markdown(value="", visible=False)
218
- lora_download_url = gr.Textbox(label="URL", placeholder="http://...my_lora_url.safetensors", lines=1)
219
- with gr.Row():
220
- lora_download = [None] * num_loras
221
- for i in range(num_loras):
222
- lora_download[i] = gr.Button(f"Get and set LoRA to {int(i+1)}")
223
 
224
  gallery.select(
225
  update_selection,
226
  inputs=[width, height],
227
  outputs=[prompt, selected_info, selected_index, width, height]
228
  )
229
-
 
 
 
 
 
 
 
 
230
  gr.on(
231
  triggers=[generate_button.click, prompt.submit],
232
  fn=change_base_model,
 
5
  from PIL import Image
6
  import spaces
7
  from diffusers import DiffusionPipeline
8
+ from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
9
  import copy
10
  import random
11
  import time
12
+
13
  from mod import (models, clear_cache, get_repo_safetensors, change_base_model,
14
  description_ui, num_loras, compose_lora_json, is_valid_lora, fuse_loras, get_trigger_word, pipe)
15
  from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_json,
 
42
  else:
43
  print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
44
 
 
45
  def update_selection(evt: gr.SelectData, width, height):
46
  selected_lora = loras[evt.index]
47
  new_placeholder = f"Type a prompt for {selected_lora['title']}"
 
63
  )
64
 
65
  @spaces.GPU(duration=70)
66
+ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
67
  pipe.to("cuda")
68
  generator = torch.Generator(device="cuda").manual_seed(seed)
69
 
 
71
  with calculateDuration("Generating image"):
72
  # Generate image
73
  image = pipe(
74
+ prompt=prompt_mash,
75
  num_inference_steps=steps,
76
  guidance_scale=cfg_scale,
77
  width=width,
 
88
  # raise gr.Error("You must select a LoRA before proceeding.")
89
  progress(0, desc="Preparing Inference.")
90
 
91
+ prompt_mash = prompt
92
  if is_valid_lora(lora_json):
93
  with calculateDuration("Loading LoRA weights"):
94
  fuse_loras(pipe, lora_json)
95
  trigger_word = get_trigger_word(lora_json)
96
+ prompt_mash = f"{prompt} {trigger_word}"
97
  if selected_index is not None:
98
  selected_lora = loras[selected_index]
99
  lora_path = selected_lora["repo"]
100
  trigger_word = selected_lora["trigger_word"]
101
+ if(trigger_word):
102
+ if "trigger_position" in selected_lora:
103
+ if selected_lora["trigger_position"] == "prepend":
104
+ prompt_mash = f"{trigger_word} {prompt}"
105
+ else:
106
+ prompt_mash = f"{prompt} {trigger_word}"
107
+ else:
108
+ prompt_mash = f"{trigger_word} {prompt}"
109
+ else:
110
+ prompt_mash = prompt
111
  # Load LoRA weights
112
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
113
  if "weights" in selected_lora:
 
122
 
123
  progress(1, desc="Preparing Inference.")
124
 
125
+ image = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
126
  if is_valid_lora(lora_json):
127
  pipe.unfuse_lora()
128
  pipe.unload_lora_weights()
 
131
  clear_cache()
132
  return image, seed
133
 
134
+ def get_huggingface_safetensors(link):
135
+ split_link = link.split("/")
136
+ if(len(split_link) == 2):
137
+ model_card = ModelCard.load(link)
138
+ base_model = model_card.data.get("base_model")
139
+ print(base_model)
140
+ if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
141
+ raise Exception("Not a FLUX LoRA!")
142
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
143
+ trigger_word = model_card.data.get("instance_prompt", "")
144
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
145
+ fs = HfFileSystem()
146
+ try:
147
+ list_of_files = fs.ls(link, detail=False)
148
+ for file in list_of_files:
149
+ if(file.endswith(".safetensors")):
150
+ safetensors_name = file.split("/")[-1]
151
+ if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
152
+ image_elements = file.split("/")
153
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
154
+ except Exception as e:
155
+ print(e)
156
+ gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
157
+ raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
158
+ return split_link[1], link, safetensors_name, trigger_word, image_url
159
+
160
+ def check_custom_model(link):
161
+ if(link.startswith("https://")):
162
+ if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
163
+ link_split = link.split("huggingface.co/")
164
+ return get_huggingface_safetensors(link_split[1])
165
+ else:
166
+ return get_huggingface_safetensors(link)
167
+
168
+ def add_custom_lora(custom_lora):
169
+ global loras
170
+ if(custom_lora):
171
+ try:
172
+ title, repo, path, trigger_word, image = check_custom_model(custom_lora)
173
+ print(f"Loaded custom LoRA: {repo}")
174
+ card = f'''
175
+ <div class="custom_lora_card">
176
+ <span>Loaded custom LoRA:</span>
177
+ <div class="card_internal">
178
+ <img src="{image}" />
179
+ <div>
180
+ <h3>{title}</h3>
181
+ <small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
182
+ </div>
183
+ </div>
184
+ </div>
185
+ '''
186
+ existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
187
+ if(not existing_item_index):
188
+ new_item = {
189
+ "image": image,
190
+ "title": title,
191
+ "repo": repo,
192
+ "weights": path,
193
+ "trigger_word": trigger_word
194
+ }
195
+ print(new_item)
196
+ existing_item_index = len(loras)
197
+ loras.append(new_item)
198
+
199
+ return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
200
+ except Exception as e:
201
+ gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
202
+ return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-FLUX LoRA"), gr.update(visible=True), gr.update(), "", None, ""
203
+ else:
204
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
205
+
206
+ def remove_custom_lora():
207
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
208
+
209
  run_lora.zerogpu = True
210
 
211
  css = '''
 
214
  #title h1{font-size: 3em; display:inline-flex; align-items:center}
215
  #title img{width: 100px; margin-right: 0.5em}
216
  #gallery .grid-wrap{height: 10vh}
217
+ #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
218
+ .card_internal{display: flex;height: 100px;margin-top: .5em}
219
+ .card_internal img{margin-right: 1em}
220
+ .styler{--form-gap-width: 0px !important}
221
  '''
222
+ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
223
  with gr.Tab("FLUX LoRA the Explorer"):
224
  title = gr.HTML(
225
  """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA">FLUX LoRA the Explorer Mod</h1>""",
 
253
  columns=3,
254
  elem_id="gallery"
255
  )
256
+ with gr.Group():
257
+ custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path", placeholder="multimodalart/vintage-ads-flux")
258
+ gr.Markdown("[Check the list of FLUX LoRas](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
259
+ custom_lora_info = gr.HTML(visible=False)
260
+ custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
261
  with gr.Column(scale=4):
262
  result = gr.Image(label="Generated Image")
263
 
 
280
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
281
  lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=0.95)
282
 
283
+ with gr.Accordion("External LoRA", open=True):
284
+ with gr.Column():
285
+ lora_repo_json = gr.JSON(value=[{}] * num_loras, visible=False)
286
+ lora_repo = [None] * num_loras
287
+ lora_weights = [None] * num_loras
288
+ lora_trigger = [None] * num_loras
289
+ lora_wt = [None] * num_loras
290
+ lora_info = [None] * num_loras
291
+ lora_copy = [None] * num_loras
292
+ lora_md = [None] * num_loras
293
+ lora_num = [None] * num_loras
294
+ for i in range(num_loras):
295
+ with gr.Group():
296
+ with gr.Row():
297
+ lora_repo[i] = gr.Dropdown(label=f"LoRA {int(i+1)} Repo", choices=get_all_lora_tupled_list(), info="Input LoRA Repo ID", value="", allow_custom_value=True)
298
+ lora_weights[i] = gr.Dropdown(label=f"LoRA {int(i+1)} Filename", choices=[], info="Optional", value="", allow_custom_value=True)
299
+ lora_trigger[i] = gr.Textbox(label=f"LoRA {int(i+1)} Trigger Prompt", lines=1, max_lines=4, value="")
300
+ lora_wt[i] = gr.Slider(label=f"LoRA {int(i+1)} Scale", minimum=-2, maximum=2, step=0.01, value=1.00)
301
+ with gr.Row():
302
+ lora_info[i] = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
303
+ lora_copy[i] = gr.Button(value="Copy example to prompt", visible=False)
304
+ lora_md[i] = gr.Markdown(value="", visible=False)
305
+ lora_num[i] = gr.Number(i, visible=False)
306
+ with gr.Accordion("From URL", open=True, visible=True):
307
  with gr.Row():
308
+ lora_search_civitai_query = gr.Textbox(label="Query", placeholder="flux", lines=1)
309
+ lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Flux.1 D", "Flux.1 S"], value=["Flux.1 D", "Flux.1 S"])
310
+ lora_search_civitai_submit = gr.Button("Search on Civitai")
311
+ lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
312
+ lora_search_civitai_json = gr.JSON(value={}, visible=False)
313
+ lora_search_civitai_desc = gr.Markdown(value="", visible=False)
314
+ lora_download_url = gr.Textbox(label="URL", placeholder="http://...my_lora_url.safetensors", lines=1)
315
  with gr.Row():
316
+ lora_download = [None] * num_loras
317
+ for i in range(num_loras):
318
+ lora_download[i] = gr.Button(f"Get and set LoRA to {int(i+1)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
 
320
  gallery.select(
321
  update_selection,
322
  inputs=[width, height],
323
  outputs=[prompt, selected_info, selected_index, width, height]
324
  )
325
+ custom_lora.input(
326
+ add_custom_lora,
327
+ inputs=[custom_lora],
328
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
329
+ )
330
+ custom_lora_button.click(
331
+ remove_custom_lora,
332
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
333
+ )
334
  gr.on(
335
  triggers=[generate_button.click, prompt.submit],
336
  fn=change_base_model,
loras.json CHANGED
@@ -1,116 +1,122 @@
1
- [
2
- {
3
- "image": "https://huggingface.co/multimodalart/flux-tarot-v1/resolve/main/images/e5f2761e5d474e6ba492d20dca0fa26f_e78f1524074b42b6ac49643ffad50ac6.png",
4
- "title": "Tarot v1",
5
- "repo": "multimodalart/flux-tarot-v1",
6
- "trigger_word": "in the style of TOK a trtcrd, tarot style",
7
- "aspect": "portrait"
8
- },
9
- {
10
- "image": "https://huggingface.co/alvdansen/frosting_lane_flux/resolve/main/images/content%20-%202024-08-11T005936.346.jpeg",
11
- "title": "Frosting Lane Flux",
12
- "repo": "alvdansen/frosting_lane_flux",
13
- "trigger_word": ""
14
- },
15
- {
16
- "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
17
- "title": "flux-Realism",
18
- "repo": "XLabs-AI/flux-RealismLora",
19
- "trigger_word": ""
20
- },
21
- {
22
- "image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
23
- "title": "animation2k",
24
- "repo": "nerijs/animation2k-flux",
25
- "trigger_word": ""
26
- },
27
- {
28
- "image":"https://huggingface.co/alvdansen/softserve_anime/resolve/main/images/ComfyUI_00062_.png",
29
- "title":"SoftServe Anime",
30
- "repo": "alvdansen/softserve_anime",
31
- "trigger_word": ""
32
- },
33
- {
34
- "image": "https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24439220.jpeg",
35
- "title": "PS1 style",
36
- "repo": "veryVANYA/ps1-style-flux",
37
- "trigger_word": "ps1 game screenshot"
38
- },
39
- {
40
- "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
41
- "title": "flux koda",
42
- "repo": "alvdansen/flux-koda",
43
- "trigger_word": "flmft style"
44
- },
45
- {
46
- "image": "https://pbs.twimg.com/media/GU7NsZPa8AA4Ddl?format=jpg&name=4096x4096",
47
- "title": "Half Illustration",
48
- "repo": "davisbro/half_illustration",
49
- "trigger_word": "in the style of TOK"
50
- },
51
- {
52
- "image":"https://pbs.twimg.com/media/GVRiSH7WgAAnI4P?format=jpg&name=medium",
53
- "title":"wrong",
54
- "repo": "fofr/flux-wrong",
55
- "trigger_word": "WRNG"
56
- },
57
- {
58
- "image":"https://huggingface.co/linoyts/yarn_art_Flux_LoRA/resolve/main/yarn_art_2.png",
59
- "title":"Yarn Art",
60
- "repo": "linoyts/yarn_art_Flux_LoRA",
61
- "trigger_word": ", yarn art style"
62
- },
63
- {
64
- "image": "https://huggingface.co/SebastianBodza/flux_lora_aquarel_watercolor/resolve/main/images/ascend.webp",
65
- "title": "Aquarell Watercolor",
66
- "repo": "SebastianBodza/Flux_Aquarell_Watercolor_v2",
67
- "trigger_word": "in a watercolor style, AQUACOLTOK. White background."
68
- },
69
- {
70
- "image": "https://huggingface.co/dataautogpt3/FLUX-SyntheticAnime/resolve/main/assets/angel.png",
71
- "title": "SyntheticAnime",
72
- "repo": "dataautogpt3/FLUX-SyntheticAnime",
73
- "trigger_word": "1980s anime screengrab, VHS quality"
74
- },
75
- {
76
- "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true",
77
- "title": "flux-anime",
78
- "repo": "XLabs-AI/flux-lora-collection",
79
- "weights": "anime_lora.safetensors",
80
- "trigger_word": ", anime"
81
- },
82
- {
83
- "image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
84
- "title": "Boreal",
85
- "repo": "kudzueye/Boreal",
86
- "weights": "boreal-flux-dev-lora-v04_1000_steps.safetensors",
87
- "trigger_word": "phone photo"
88
- },
89
- {
90
- "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_18.png?raw=true",
91
- "title": "flux-disney",
92
- "repo": "XLabs-AI/flux-lora-collection",
93
- "weights": "disney_lora.safetensors",
94
- "trigger_word": ", disney style"
95
- },
96
- {
97
- "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_23.png?raw=true",
98
- "title": "flux-art",
99
- "repo": "XLabs-AI/flux-lora-collection",
100
- "weights": "art_lora.safetensors",
101
- "trigger_word": ", art"
102
- },
103
- {
104
- "image": "https://huggingface.co/martintomov/retrofuturism-flux/resolve/main/images/2e40deba-858e-454f-ae1c-d1ba2adb6a65.jpeg",
105
- "title": "Retrofuturism Flux",
106
- "repo": "martintomov/retrofuturism-flux",
107
- "trigger_word": ", retrofuturism"
108
- },
109
- {
110
- "image": "https://huggingface.co/alfredplpl/flux.1-dev-modern-anime-lora/resolve/main/eyecatch2.jpg",
111
- "title": "flux.1-dev-modern-anime-lora",
112
- "repo": "alfredplpl/flux.1-dev-modern-anime-lora",
113
- "weights": "modern-anime-lora_diffusers.safetensors",
114
- "trigger_word": "modern anime"
115
- }
 
 
 
 
 
 
116
  ]
 
1
+ [
2
+ {
3
+ "image": "https://huggingface.co/multimodalart/flux-tarot-v1/resolve/main/images/e5f2761e5d474e6ba492d20dca0fa26f_e78f1524074b42b6ac49643ffad50ac6.png",
4
+ "title": "Tarot v1",
5
+ "repo": "multimodalart/flux-tarot-v1",
6
+ "trigger_word": "in the style of TOK a trtcrd, tarot style",
7
+ "aspect": "portrait"
8
+ },
9
+ {
10
+ "image": "https://huggingface.co/alvdansen/frosting_lane_flux/resolve/main/images/content%20-%202024-08-11T005936.346.jpeg",
11
+ "title": "Frosting Lane Flux",
12
+ "repo": "alvdansen/frosting_lane_flux",
13
+ "trigger_word": ""
14
+ },
15
+ {
16
+ "image": "https://huggingface.co/AIWarper/RubberCore1920sCartoonStyle/resolve/main/images/Rub_00006_.png",
17
+ "title": "1920s cartoon",
18
+ "repo": "AIWarper/RubberCore1920sCartoonStyle",
19
+ "trigger_word": "RU883R style",
20
+ "trigger_position": "prepend"
21
+ },
22
+ {
23
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
24
+ "title": "flux-Realism",
25
+ "repo": "XLabs-AI/flux-RealismLora",
26
+ "trigger_word": ""
27
+ },
28
+ {
29
+ "image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
30
+ "title": "animation2k",
31
+ "repo": "nerijs/animation2k-flux",
32
+ "trigger_word": ""
33
+ },
34
+ {
35
+ "image":"https://huggingface.co/alvdansen/softserve_anime/resolve/main/images/ComfyUI_00062_.png",
36
+ "title":"SoftServe Anime",
37
+ "repo": "alvdansen/softserve_anime",
38
+ "trigger_word": ""
39
+ },
40
+ {
41
+ "image": "https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24439220.jpeg",
42
+ "title": "PS1 style",
43
+ "repo": "veryVANYA/ps1-style-flux",
44
+ "trigger_word": "ps1 game screenshot"
45
+ },
46
+ {
47
+ "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
48
+ "title": "flux koda",
49
+ "repo": "alvdansen/flux-koda",
50
+ "trigger_word": "flmft style"
51
+ },
52
+ {
53
+ "image": "https://pbs.twimg.com/media/GU7NsZPa8AA4Ddl?format=jpg&name=4096x4096",
54
+ "title": "Half Illustration",
55
+ "repo": "davisbro/half_illustration",
56
+ "trigger_word": "in the style of TOK"
57
+ },
58
+ {
59
+ "image":"https://pbs.twimg.com/media/GVRiSH7WgAAnI4P?format=jpg&name=medium",
60
+ "title":"wrong",
61
+ "repo": "fofr/flux-wrong",
62
+ "trigger_word": "WRNG"
63
+ },
64
+ {
65
+ "image":"https://huggingface.co/linoyts/yarn_art_Flux_LoRA/resolve/main/yarn_art_2.png",
66
+ "title":"Yarn Art",
67
+ "repo": "linoyts/yarn_art_Flux_LoRA",
68
+ "trigger_word": ", yarn art style"
69
+ },
70
+ {
71
+ "image": "https://huggingface.co/Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style/resolve/main/08a19840b6214b76b0607b2f9d5a7e28_63159b9d98124c008efb1d36446a615c.png",
72
+ "title": "Paper Cutout",
73
+ "repo": "Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style",
74
+ "trigger_word": ", Paper Cutout Style"
75
+ },
76
+ {
77
+ "image": "https://huggingface.co/SebastianBodza/flux_lora_aquarel_watercolor/resolve/main/images/ascend.webp",
78
+ "title": "Aquarell Watercolor",
79
+ "repo": "SebastianBodza/Flux_Aquarell_Watercolor_v2",
80
+ "trigger_word": "in a watercolor style, AQUACOLTOK. White background."
81
+ },
82
+ {
83
+ "image": "https://huggingface.co/dataautogpt3/FLUX-SyntheticAnime/resolve/main/assets/angel.png",
84
+ "title": "SyntheticAnime",
85
+ "repo": "dataautogpt3/FLUX-SyntheticAnime",
86
+ "trigger_word": "1980s anime screengrab, VHS quality"
87
+ },
88
+ {
89
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true",
90
+ "title": "flux-anime",
91
+ "repo": "XLabs-AI/flux-lora-collection",
92
+ "weights": "anime_lora.safetensors",
93
+ "trigger_word": ", anime"
94
+ },
95
+ {
96
+ "image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
97
+ "title": "Boreal",
98
+ "repo": "kudzueye/Boreal",
99
+ "weights": "boreal-flux-dev-lora-v04_1000_steps.safetensors",
100
+ "trigger_word": "phone photo"
101
+ },
102
+ {
103
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_18.png?raw=true",
104
+ "title": "flux-disney",
105
+ "repo": "XLabs-AI/flux-lora-collection",
106
+ "weights": "disney_lora.safetensors",
107
+ "trigger_word": ", disney style"
108
+ },
109
+ {
110
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_23.png?raw=true",
111
+ "title": "flux-art",
112
+ "repo": "XLabs-AI/flux-lora-collection",
113
+ "weights": "art_lora.safetensors",
114
+ "trigger_word": ", art"
115
+ },
116
+ {
117
+ "image": "https://huggingface.co/martintomov/retrofuturism-flux/resolve/main/images/2e40deba-858e-454f-ae1c-d1ba2adb6a65.jpeg",
118
+ "title": "Retrofuturism Flux",
119
+ "repo": "martintomov/retrofuturism-flux",
120
+ "trigger_word": ", retrofuturism"
121
+ }
122
  ]
mod.py CHANGED
@@ -18,7 +18,7 @@ models = [
18
  "sayakpaul/FLUX.1-merged",
19
  "John6666/blue-pencil-flux1-v001-fp8-flux",
20
  "John6666/copycat-flux-test-fp8-v11-fp8-flux",
21
- "John6666/nepotism-fuxdevschnell-v3aio-flux",
22
  "John6666/niji-style-flux-devfp8-fp8-flux",
23
  "John6666/fluxunchained-artfulnsfw-fut516xfp8e4m3fnv11-fp8-flux",
24
  "John6666/fastflux-unchained-t5f16-fp8-flux",
 
18
  "sayakpaul/FLUX.1-merged",
19
  "John6666/blue-pencil-flux1-v001-fp8-flux",
20
  "John6666/copycat-flux-test-fp8-v11-fp8-flux",
21
+ "John6666/nepotism-fuxdevschnell-v3aio-fp8-flux",
22
  "John6666/niji-style-flux-devfp8-fp8-flux",
23
  "John6666/fluxunchained-artfulnsfw-fut516xfp8e4m3fnv11-fp8-flux",
24
  "John6666/fastflux-unchained-t5f16-fp8-flux",