John6666 commited on
Commit
0353ee6
1 Parent(s): ca41bcc

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +34 -16
  2. live_preview_helpers.py +166 -0
  3. loras.json +182 -149
app.py CHANGED
@@ -4,7 +4,8 @@ import json
4
  import logging
5
  import torch
6
  from PIL import Image
7
- from diffusers import DiffusionPipeline
 
8
  from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel
9
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
10
  import copy
@@ -21,16 +22,29 @@ from flux import (search_civitai_lora, select_civitai_lora, search_civitai_lora_
21
  from tagger.tagger import predict_tags_wd, compose_prompt_to_copy
22
  from tagger.fl2flux import predict_tags_fl2_flux
23
 
 
 
 
 
 
 
 
24
  # Initialize the base model
25
  base_model = models[0]
26
  controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union'
27
  #controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union-alpha'
 
 
28
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
29
  controlnet_union = None
30
  controlnet = None
31
  last_model = models[0]
32
  last_cn_on = False
33
 
 
 
 
 
34
  # https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union
35
  # https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union
36
  def change_base_model(repo_id: str, cn_on: bool):
@@ -39,8 +53,6 @@ def change_base_model(repo_id: str, cn_on: bool):
39
  global controlnet
40
  global last_model
41
  global last_cn_on
42
- dtype = torch.bfloat16
43
- #dtype = torch.float8_e4m3fn
44
  try:
45
  if (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(visible=True)
46
  if cn_on:
@@ -50,6 +62,7 @@ def change_base_model(repo_id: str, cn_on: bool):
50
  controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=dtype)
51
  controlnet = FluxMultiControlNetModel([controlnet_union])
52
  pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype)
 
53
  last_model = repo_id
54
  last_cn_on = cn_on
55
  #progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
@@ -59,6 +72,7 @@ def change_base_model(repo_id: str, cn_on: bool):
59
  print(f"Loading model: {repo_id}")
60
  clear_cache()
61
  pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
 
62
  last_model = repo_id
63
  last_cn_on = cn_on
64
  #progress(1, desc=f"Model loaded: {repo_id}")
@@ -70,12 +84,6 @@ def change_base_model(repo_id: str, cn_on: bool):
70
 
71
  change_base_model.zerogpu = True
72
 
73
- # Load LoRAs from JSON file
74
- with open('loras.json', 'r') as f:
75
- loras = json.load(f)
76
-
77
- MAX_SEED = 2**32-1
78
-
79
  class calculateDuration:
80
  def __init__(self, activity_name=""):
81
  self.activity_name = activity_name
@@ -126,7 +134,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
126
  modes, images, scales = get_control_params()
127
  if not cn_on or len(modes) == 0:
128
  progress(0, desc="Start Inference.")
129
- image = pipe(
130
  prompt=prompt_mash,
131
  num_inference_steps=steps,
132
  guidance_scale=cfg_scale,
@@ -134,12 +142,15 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
134
  height=height,
135
  generator=generator,
136
  joint_attention_kwargs={"scale": lora_scale},
137
- ).images[0]
 
 
 
138
  else:
139
  progress(0, desc="Start Inference with ControlNet.")
140
  if controlnet is not None: controlnet.to("cuda")
141
  if controlnet_union is not None: controlnet_union.to("cuda")
142
- image = pipe(
143
  prompt=prompt_mash,
144
  control_image=images,
145
  control_mode=modes,
@@ -150,7 +161,10 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
150
  controlnet_conditioning_scale=scales,
151
  generator=generator,
152
  joint_attention_kwargs={"scale": lora_scale},
153
- ).images[0]
 
 
 
154
  except Exception as e:
155
  print(e)
156
  raise gr.Error(f"Inference Error: {e}")
@@ -197,8 +211,12 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
197
  seed = random.randint(0, MAX_SEED)
198
 
199
  progress(0, desc="Running Inference.")
200
-
201
- image = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress)
 
 
 
 
202
  if is_valid_lora(lora_json):
203
  pipe.unfuse_lora()
204
  pipe.unload_lora_weights()
@@ -207,7 +225,7 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
207
  if controlnet is not None: controlnet.to("cpu")
208
  if controlnet_union is not None: controlnet_union.to("cpu")
209
  clear_cache()
210
- return image, seed
211
 
212
  def get_huggingface_safetensors(link):
213
  split_link = link.split("/")
 
4
  import logging
5
  import torch
6
  from PIL import Image
7
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
8
+ from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
9
  from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel
10
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
11
  import copy
 
22
  from tagger.tagger import predict_tags_wd, compose_prompt_to_copy
23
  from tagger.fl2flux import predict_tags_fl2_flux
24
 
25
+ # Load LoRAs from JSON file
26
+ with open('loras.json', 'r') as f:
27
+ loras = json.load(f)
28
+
29
+ dtype = torch.bfloat16
30
+ #dtype = torch.float8_e4m3fn
31
+ device = "cuda" if torch.cuda.is_available() else "cpu"
32
  # Initialize the base model
33
  base_model = models[0]
34
  controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union'
35
  #controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union-alpha'
36
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
37
+ good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
38
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
39
  controlnet_union = None
40
  controlnet = None
41
  last_model = models[0]
42
  last_cn_on = False
43
 
44
+ MAX_SEED = 2**32-1
45
+
46
+ pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
47
+
48
  # https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union
49
  # https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union
50
  def change_base_model(repo_id: str, cn_on: bool):
 
53
  global controlnet
54
  global last_model
55
  global last_cn_on
 
 
56
  try:
57
  if (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(visible=True)
58
  if cn_on:
 
62
  controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=dtype)
63
  controlnet = FluxMultiControlNetModel([controlnet_union])
64
  pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype)
65
+ pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
66
  last_model = repo_id
67
  last_cn_on = cn_on
68
  #progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
 
72
  print(f"Loading model: {repo_id}")
73
  clear_cache()
74
  pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
75
+ pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
76
  last_model = repo_id
77
  last_cn_on = cn_on
78
  #progress(1, desc=f"Model loaded: {repo_id}")
 
84
 
85
  change_base_model.zerogpu = True
86
 
 
 
 
 
 
 
87
  class calculateDuration:
88
  def __init__(self, activity_name=""):
89
  self.activity_name = activity_name
 
134
  modes, images, scales = get_control_params()
135
  if not cn_on or len(modes) == 0:
136
  progress(0, desc="Start Inference.")
137
+ for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
138
  prompt=prompt_mash,
139
  num_inference_steps=steps,
140
  guidance_scale=cfg_scale,
 
142
  height=height,
143
  generator=generator,
144
  joint_attention_kwargs={"scale": lora_scale},
145
+ output_type="pil",
146
+ good_vae=good_vae,
147
+ ):
148
+ yield img
149
  else:
150
  progress(0, desc="Start Inference with ControlNet.")
151
  if controlnet is not None: controlnet.to("cuda")
152
  if controlnet_union is not None: controlnet_union.to("cuda")
153
+ for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
154
  prompt=prompt_mash,
155
  control_image=images,
156
  control_mode=modes,
 
161
  controlnet_conditioning_scale=scales,
162
  generator=generator,
163
  joint_attention_kwargs={"scale": lora_scale},
164
+ output_type="pil",
165
+ good_vae=good_vae,
166
+ ):
167
+ yield img
168
  except Exception as e:
169
  print(e)
170
  raise gr.Error(f"Inference Error: {e}")
 
211
  seed = random.randint(0, MAX_SEED)
212
 
213
  progress(0, desc="Running Inference.")
214
+ image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress)
215
+ # Consume the generator to get the final image
216
+ final_image = None
217
+ for image in image_generator:
218
+ final_image = image
219
+ yield image, seed # Yield intermediate images and seed
220
  if is_valid_lora(lora_json):
221
  pipe.unfuse_lora()
222
  pipe.unload_lora_weights()
 
225
  if controlnet is not None: controlnet.to("cpu")
226
  if controlnet_union is not None: controlnet_union.to("cpu")
227
  clear_cache()
228
+ return final_image, seed # Return the final image and seed
229
 
230
  def get_huggingface_safetensors(link):
231
  split_link = link.split("/")
live_preview_helpers.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from diffusers import FluxPipeline, AutoencoderTiny, FlowMatchEulerDiscreteScheduler
4
+ from typing import Any, Dict, List, Optional, Union
5
+
6
+ # Helper functions
7
+ def calculate_shift(
8
+ image_seq_len,
9
+ base_seq_len: int = 256,
10
+ max_seq_len: int = 4096,
11
+ base_shift: float = 0.5,
12
+ max_shift: float = 1.16,
13
+ ):
14
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
15
+ b = base_shift - m * base_seq_len
16
+ mu = image_seq_len * m + b
17
+ return mu
18
+
19
+ def retrieve_timesteps(
20
+ scheduler,
21
+ num_inference_steps: Optional[int] = None,
22
+ device: Optional[Union[str, torch.device]] = None,
23
+ timesteps: Optional[List[int]] = None,
24
+ sigmas: Optional[List[float]] = None,
25
+ **kwargs,
26
+ ):
27
+ if timesteps is not None and sigmas is not None:
28
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
29
+ if timesteps is not None:
30
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
31
+ timesteps = scheduler.timesteps
32
+ num_inference_steps = len(timesteps)
33
+ elif sigmas is not None:
34
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
35
+ timesteps = scheduler.timesteps
36
+ num_inference_steps = len(timesteps)
37
+ else:
38
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
39
+ timesteps = scheduler.timesteps
40
+ return timesteps, num_inference_steps
41
+
42
+ # FLUX pipeline function
43
+ @torch.inference_mode()
44
+ def flux_pipe_call_that_returns_an_iterable_of_images(
45
+ self,
46
+ prompt: Union[str, List[str]] = None,
47
+ prompt_2: Optional[Union[str, List[str]]] = None,
48
+ height: Optional[int] = None,
49
+ width: Optional[int] = None,
50
+ num_inference_steps: int = 28,
51
+ timesteps: List[int] = None,
52
+ guidance_scale: float = 3.5,
53
+ num_images_per_prompt: Optional[int] = 1,
54
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
55
+ latents: Optional[torch.FloatTensor] = None,
56
+ prompt_embeds: Optional[torch.FloatTensor] = None,
57
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
58
+ output_type: Optional[str] = "pil",
59
+ return_dict: bool = True,
60
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
61
+ max_sequence_length: int = 512,
62
+ good_vae: Optional[Any] = None,
63
+ ):
64
+ height = height or self.default_sample_size * self.vae_scale_factor
65
+ width = width or self.default_sample_size * self.vae_scale_factor
66
+
67
+ # 1. Check inputs
68
+ self.check_inputs(
69
+ prompt,
70
+ prompt_2,
71
+ height,
72
+ width,
73
+ prompt_embeds=prompt_embeds,
74
+ pooled_prompt_embeds=pooled_prompt_embeds,
75
+ max_sequence_length=max_sequence_length,
76
+ )
77
+
78
+ self._guidance_scale = guidance_scale
79
+ self._joint_attention_kwargs = joint_attention_kwargs
80
+ self._interrupt = False
81
+
82
+ # 2. Define call parameters
83
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
84
+ device = self._execution_device
85
+
86
+ # 3. Encode prompt
87
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
88
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
89
+ prompt=prompt,
90
+ prompt_2=prompt_2,
91
+ prompt_embeds=prompt_embeds,
92
+ pooled_prompt_embeds=pooled_prompt_embeds,
93
+ device=device,
94
+ num_images_per_prompt=num_images_per_prompt,
95
+ max_sequence_length=max_sequence_length,
96
+ lora_scale=lora_scale,
97
+ )
98
+ # 4. Prepare latent variables
99
+ num_channels_latents = self.transformer.config.in_channels // 4
100
+ latents, latent_image_ids = self.prepare_latents(
101
+ batch_size * num_images_per_prompt,
102
+ num_channels_latents,
103
+ height,
104
+ width,
105
+ prompt_embeds.dtype,
106
+ device,
107
+ generator,
108
+ latents,
109
+ )
110
+ # 5. Prepare timesteps
111
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
112
+ image_seq_len = latents.shape[1]
113
+ mu = calculate_shift(
114
+ image_seq_len,
115
+ self.scheduler.config.base_image_seq_len,
116
+ self.scheduler.config.max_image_seq_len,
117
+ self.scheduler.config.base_shift,
118
+ self.scheduler.config.max_shift,
119
+ )
120
+ timesteps, num_inference_steps = retrieve_timesteps(
121
+ self.scheduler,
122
+ num_inference_steps,
123
+ device,
124
+ timesteps,
125
+ sigmas,
126
+ mu=mu,
127
+ )
128
+ self._num_timesteps = len(timesteps)
129
+
130
+ # Handle guidance
131
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
132
+
133
+ # 6. Denoising loop
134
+ for i, t in enumerate(timesteps):
135
+ if self.interrupt:
136
+ continue
137
+
138
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
139
+
140
+ noise_pred = self.transformer(
141
+ hidden_states=latents,
142
+ timestep=timestep / 1000,
143
+ guidance=guidance,
144
+ pooled_projections=pooled_prompt_embeds,
145
+ encoder_hidden_states=prompt_embeds,
146
+ txt_ids=text_ids,
147
+ img_ids=latent_image_ids,
148
+ joint_attention_kwargs=self.joint_attention_kwargs,
149
+ return_dict=False,
150
+ )[0]
151
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
152
+
153
+ # Yield intermediate result
154
+ latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
155
+ latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
156
+ image = self.vae.decode(latents_for_image, return_dict=False)[0]
157
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
158
+ torch.cuda.empty_cache()
159
+
160
+ # Final image using good_vae
161
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
162
+ latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
163
+ image = good_vae.decode(latents, return_dict=False)[0]
164
+ self.maybe_free_model_hooks()
165
+ torch.cuda.empty_cache()
166
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
loras.json CHANGED
@@ -1,150 +1,183 @@
1
- [
2
- {
3
- "image": "https://cdn-uploads.huggingface.co/production/uploads/641498f7479c98a0b36f9e3e/2hT_tW_DCcF60lNyCBE_8.png",
4
- "title": "Abe Shinzo Flux",
5
- "repo": "AbeShinzo0708/AbeShinzo_flux_lora_test",
6
- "weights": "AbeShinzo.safetensors",
7
- "trigger_word": ", Shinzo Abe"
8
- },
9
- {
10
- "image": "https://huggingface.co/p1atdev/flux.1-schnell-pvc-style-lora/resolve/main/images/flux_lora_00221_.png",
11
- "title": "FLUX.1 schnell PVC style",
12
- "repo": "p1atdev/flux.1-schnell-pvc-style-lora",
13
- "weights": "pvc-shnell-7250+7500.safetensors",
14
- "trigger_word": ", pvc figure"
15
- },
16
- {
17
- "image": "https://cdn-uploads.huggingface.co/production/uploads/64b24543eec33e27dc9a6eca/sCvp6zDbBTXBEHh3ZSav6.png",
18
- "title": "Flux Pastel Anime",
19
- "repo": "Raelina/Flux-Pastel-Anime",
20
- "weights": "lora_pastel_anime_flux.safetensors",
21
- "trigger_word": ""
22
- },
23
- {
24
- "image": "https://huggingface.co/wavymulder/OverlordStyleFLUX/resolve/main/imgs/ComfyUI_00725_.png",
25
- "title": "Overlord Style FLUX",
26
- "repo": "wavymulder/OverlordStyleFLUX",
27
- "weights": "ovld_style_overlord_wavymulder.safetensors",
28
- "trigger_word": ", ovld style anime"
29
- },
30
- {
31
- "image": "https://huggingface.co/multimodalart/flux-tarot-v1/resolve/main/images/e5f2761e5d474e6ba492d20dca0fa26f_e78f1524074b42b6ac49643ffad50ac6.png",
32
- "title": "Tarot v1",
33
- "repo": "multimodalart/flux-tarot-v1",
34
- "trigger_word": "in the style of TOK a trtcrd, tarot style",
35
- "aspect": "portrait"
36
- },
37
- {
38
- "image": "https://huggingface.co/alvdansen/frosting_lane_flux/resolve/main/images/content%20-%202024-08-11T005936.346.jpeg",
39
- "title": "Frosting Lane Flux",
40
- "repo": "alvdansen/frosting_lane_flux",
41
- "trigger_word": ""
42
- },
43
- {
44
- "image": "https://huggingface.co/AIWarper/RubberCore1920sCartoonStyle/resolve/main/images/Rub_00006_.png",
45
- "title": "1920s cartoon",
46
- "repo": "AIWarper/RubberCore1920sCartoonStyle",
47
- "trigger_word": "RU883R style",
48
- "trigger_position": "prepend"
49
- },
50
- {
51
- "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
52
- "title": "flux-Realism",
53
- "repo": "XLabs-AI/flux-RealismLora",
54
- "trigger_word": ""
55
- },
56
- {
57
- "image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
58
- "title": "animation2k",
59
- "repo": "nerijs/animation2k-flux",
60
- "trigger_word": ""
61
- },
62
- {
63
- "image":"https://huggingface.co/alvdansen/softserve_anime/resolve/main/images/ComfyUI_00062_.png",
64
- "title":"SoftServe Anime",
65
- "repo": "alvdansen/softserve_anime",
66
- "trigger_word": ""
67
- },
68
- {
69
- "image": "https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24439220.jpeg",
70
- "title": "PS1 style",
71
- "repo": "veryVANYA/ps1-style-flux",
72
- "trigger_word": "ps1 game screenshot"
73
- },
74
- {
75
- "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
76
- "title": "flux koda",
77
- "repo": "alvdansen/flux-koda",
78
- "trigger_word": "flmft style"
79
- },
80
- {
81
- "image": "https://pbs.twimg.com/media/GU7NsZPa8AA4Ddl?format=jpg&name=4096x4096",
82
- "title": "Half Illustration",
83
- "repo": "davisbro/half_illustration",
84
- "trigger_word": "in the style of TOK"
85
- },
86
- {
87
- "image":"https://pbs.twimg.com/media/GVRiSH7WgAAnI4P?format=jpg&name=medium",
88
- "title":"wrong",
89
- "repo": "fofr/flux-wrong",
90
- "trigger_word": "WRNG"
91
- },
92
- {
93
- "image":"https://huggingface.co/linoyts/yarn_art_Flux_LoRA/resolve/main/yarn_art_2.png",
94
- "title":"Yarn Art",
95
- "repo": "linoyts/yarn_art_Flux_LoRA",
96
- "trigger_word": ", yarn art style"
97
- },
98
- {
99
- "image": "https://huggingface.co/Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style/resolve/main/08a19840b6214b76b0607b2f9d5a7e28_63159b9d98124c008efb1d36446a615c.png",
100
- "title": "Paper Cutout",
101
- "repo": "Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style",
102
- "trigger_word": ", Paper Cutout Style"
103
- },
104
- {
105
- "image": "https://huggingface.co/SebastianBodza/flux_lora_aquarel_watercolor/resolve/main/images/ascend.webp",
106
- "title": "Aquarell Watercolor",
107
- "repo": "SebastianBodza/Flux_Aquarell_Watercolor_v2",
108
- "trigger_word": "in a watercolor style, AQUACOLTOK. White background."
109
- },
110
- {
111
- "image": "https://huggingface.co/dataautogpt3/FLUX-SyntheticAnime/resolve/main/assets/angel.png",
112
- "title": "SyntheticAnime",
113
- "repo": "dataautogpt3/FLUX-SyntheticAnime",
114
- "trigger_word": "1980s anime screengrab, VHS quality"
115
- },
116
- {
117
- "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true",
118
- "title": "flux-anime",
119
- "repo": "XLabs-AI/flux-lora-collection",
120
- "weights": "anime_lora.safetensors",
121
- "trigger_word": ", anime"
122
- },
123
- {
124
- "image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
125
- "title": "Boreal",
126
- "repo": "kudzueye/Boreal",
127
- "weights": "boreal-flux-dev-lora-v04_1000_steps.safetensors",
128
- "trigger_word": "phone photo"
129
- },
130
- {
131
- "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_18.png?raw=true",
132
- "title": "flux-disney",
133
- "repo": "XLabs-AI/flux-lora-collection",
134
- "weights": "disney_lora.safetensors",
135
- "trigger_word": ", disney style"
136
- },
137
- {
138
- "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_23.png?raw=true",
139
- "title": "flux-art",
140
- "repo": "XLabs-AI/flux-lora-collection",
141
- "weights": "art_lora.safetensors",
142
- "trigger_word": ", art"
143
- },
144
- {
145
- "image": "https://huggingface.co/martintomov/retrofuturism-flux/resolve/main/images/2e40deba-858e-454f-ae1c-d1ba2adb6a65.jpeg",
146
- "title": "Retrofuturism Flux",
147
- "repo": "martintomov/retrofuturism-flux",
148
- "trigger_word": ", retrofuturism"
149
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  ]
 
1
+ [
2
+ {
3
+ "image": "https://cdn-uploads.huggingface.co/production/uploads/641498f7479c98a0b36f9e3e/2hT_tW_DCcF60lNyCBE_8.png",
4
+ "title": "Abe Shinzo Flux",
5
+ "repo": "AbeShinzo0708/AbeShinzo_flux_lora_test",
6
+ "weights": "AbeShinzo.safetensors",
7
+ "trigger_word": ", Shinzo Abe"
8
+ },
9
+ {
10
+ "image": "https://huggingface.co/p1atdev/flux.1-schnell-pvc-style-lora/resolve/main/images/flux_lora_00221_.png",
11
+ "title": "FLUX.1 schnell PVC style",
12
+ "repo": "p1atdev/flux.1-schnell-pvc-style-lora",
13
+ "weights": "pvc-shnell-7250+7500.safetensors",
14
+ "trigger_word": ", pvc figure"
15
+ },
16
+ {
17
+ "image": "https://cdn-uploads.huggingface.co/production/uploads/64b24543eec33e27dc9a6eca/sCvp6zDbBTXBEHh3ZSav6.png",
18
+ "title": "Flux Pastel Anime",
19
+ "repo": "Raelina/Flux-Pastel-Anime",
20
+ "weights": "lora_pastel_anime_flux.safetensors",
21
+ "trigger_word": ""
22
+ },
23
+ {
24
+ "image": "https://huggingface.co/wavymulder/OverlordStyleFLUX/resolve/main/imgs/ComfyUI_00725_.png",
25
+ "title": "Overlord Style FLUX",
26
+ "repo": "wavymulder/OverlordStyleFLUX",
27
+ "weights": "ovld_style_overlord_wavymulder.safetensors",
28
+ "trigger_word": ", ovld style anime"
29
+ },
30
+ {
31
+ "image": "https://huggingface.co/multimodalart/flux-tarot-v1/resolve/main/images/e5f2761e5d474e6ba492d20dca0fa26f_e78f1524074b42b6ac49643ffad50ac6.png",
32
+ "title": "Tarot v1",
33
+ "repo": "multimodalart/flux-tarot-v1",
34
+ "trigger_word": "in the style of TOK a trtcrd, tarot style",
35
+ "aspect": "portrait"
36
+ },
37
+ {
38
+ "image": "https://huggingface.co/alvdansen/softpasty-flux-dev/resolve/main/images/ComfyUI_00814_%20(2).png",
39
+ "title": "SoftPasty",
40
+ "repo": "alvdansen/softpasty-flux-dev",
41
+ "trigger_word": "araminta_illus illustration style"
42
+ },
43
+ {
44
+ "image": "https://huggingface.co/AIWarper/RubberCore1920sCartoonStyle/resolve/main/images/Rub_00006_.png",
45
+ "title": "1920s cartoon",
46
+ "repo": "AIWarper/RubberCore1920sCartoonStyle",
47
+ "trigger_word": "RU883R style",
48
+ "trigger_position": "prepend"
49
+ },
50
+ {
51
+ "image": "https://huggingface.co/mgwr/Cine-Aesthetic/resolve/main/images/00030-1333633802.png",
52
+ "title": "Cine Aesthetic",
53
+ "repo": "mgwr/Cine-Aesthetic",
54
+ "trigger_word": "mgwr/cine",
55
+ "trigger_position": "prepend"
56
+ },
57
+ {
58
+ "image": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration/resolve/main/images/example3.png",
59
+ "title": "Blended Realistic Illustration",
60
+ "repo": "Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration",
61
+ "trigger_word": "artistic style blends reality and illustration elements"
62
+ },
63
+ {
64
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
65
+ "title": "flux-Realism",
66
+ "repo": "XLabs-AI/flux-RealismLora",
67
+ "trigger_word": ""
68
+ },
69
+ {
70
+ "image": "https://huggingface.co/multimodalart/vintage-ads-flux/resolve/main/samples/j_XNU6Oe0mgttyvf9uPb3_dc244dd3d6c246b4aff8351444868d66.png",
71
+ "title": "Vintage Ads",
72
+ "repo":"multimodalart/vintage-ads-flux",
73
+ "trigger_word": "a vintage ad of",
74
+ "trigger_position": "prepend"
75
+ },
76
+ {
77
+ "image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
78
+ "title": "animation2k",
79
+ "repo": "nerijs/animation2k-flux",
80
+ "trigger_word": ""
81
+ },
82
+ {
83
+ "image":"https://huggingface.co/alvdansen/softserve_anime/resolve/main/images/ComfyUI_00062_.png",
84
+ "title":"SoftServe Anime",
85
+ "repo": "alvdansen/softserve_anime",
86
+ "trigger_word": ""
87
+ },
88
+ {
89
+ "image": "https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24439220.jpeg",
90
+ "title": "PS1 style",
91
+ "repo": "veryVANYA/ps1-style-flux",
92
+ "trigger_word": "ps1 game screenshot"
93
+ },
94
+ {
95
+ "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
96
+ "title": "flux koda",
97
+ "repo": "alvdansen/flux-koda",
98
+ "trigger_word": "flmft style"
99
+ },
100
+ {
101
+ "image": "https://huggingface.co/alvdansen/frosting_lane_flux/resolve/main/images/content%20-%202024-08-11T005936.346.jpeg",
102
+ "title": "Frosting Lane Flux",
103
+ "repo": "alvdansen/frosting_lane_flux",
104
+ "trigger_word": ""
105
+ },
106
+ {
107
+ "image": "https://pbs.twimg.com/media/GU7NsZPa8AA4Ddl?format=jpg&name=4096x4096",
108
+ "title": "Half Illustration",
109
+ "repo": "davisbro/half_illustration",
110
+ "trigger_word": "in the style of TOK"
111
+ },
112
+ {
113
+ "image":"https://pbs.twimg.com/media/GVRiSH7WgAAnI4P?format=jpg&name=medium",
114
+ "title":"wrong",
115
+ "repo": "fofr/flux-wrong",
116
+ "trigger_word": "WRNG"
117
+ },
118
+ {
119
+ "image":"https://huggingface.co/linoyts/yarn_art_Flux_LoRA/resolve/main/yarn_art_2.png",
120
+ "title":"Yarn Art",
121
+ "repo": "linoyts/yarn_art_Flux_LoRA",
122
+ "trigger_word": ", yarn art style"
123
+ },
124
+ {
125
+ "image": "https://huggingface.co/Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style/resolve/main/08a19840b6214b76b0607b2f9d5a7e28_63159b9d98124c008efb1d36446a615c.png",
126
+ "title": "Paper Cutout",
127
+ "repo": "Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style",
128
+ "trigger_word": ", Paper Cutout Style"
129
+ },
130
+ {
131
+ "image": "https://huggingface.co/SebastianBodza/flux_lora_aquarel_watercolor/resolve/main/images/ascend.webp",
132
+ "title": "Aquarell Watercolor",
133
+ "repo": "SebastianBodza/Flux_Aquarell_Watercolor_v2",
134
+ "trigger_word": "in a watercolor style, AQUACOLTOK. White background."
135
+ },
136
+ {
137
+ "image": "https://huggingface.co/dataautogpt3/FLUX-SyntheticAnime/resolve/main/assets/angel.png",
138
+ "title": "SyntheticAnime",
139
+ "repo": "dataautogpt3/FLUX-SyntheticAnime",
140
+ "trigger_word": "1980s anime screengrab, VHS quality"
141
+ },
142
+ {
143
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true",
144
+ "title": "flux-anime",
145
+ "repo": "XLabs-AI/flux-lora-collection",
146
+ "weights": "anime_lora.safetensors",
147
+ "trigger_word": ", anime"
148
+ },
149
+ {
150
+ "image": "https://replicate.delivery/yhqm/QD8Ioy5NExqSCtBS8hG04XIRQZFaC9pxJemINT1bibyjZfSTA/out-0.webp",
151
+ "title": "80s Cyberpunk",
152
+ "repo": "fofr/flux-80s-cyberpunk",
153
+ "trigger_word": "style of 80s cyberpunk",
154
+ "trigger_position": "prepend"
155
+ },
156
+ {
157
+ "image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
158
+ "title": "Boreal",
159
+ "repo": "kudzueye/Boreal",
160
+ "weights": "boreal-flux-dev-lora-v04_1000_steps.safetensors",
161
+ "trigger_word": "phone photo"
162
+ },
163
+ {
164
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_18.png?raw=true",
165
+ "title": "flux-disney",
166
+ "repo": "XLabs-AI/flux-lora-collection",
167
+ "weights": "disney_lora.safetensors",
168
+ "trigger_word": ", disney style"
169
+ },
170
+ {
171
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_23.png?raw=true",
172
+ "title": "flux-art",
173
+ "repo": "XLabs-AI/flux-lora-collection",
174
+ "weights": "art_lora.safetensors",
175
+ "trigger_word": ", art"
176
+ },
177
+ {
178
+ "image": "https://huggingface.co/martintomov/retrofuturism-flux/resolve/main/images/2e40deba-858e-454f-ae1c-d1ba2adb6a65.jpeg",
179
+ "title": "Retrofuturism Flux",
180
+ "repo": "martintomov/retrofuturism-flux",
181
+ "trigger_word": ", retrofuturism"
182
+ }
183
  ]