multimodalart HF staff commited on
Commit
d06267b
1 Parent(s): 42015e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -103
app.py CHANGED
@@ -49,6 +49,10 @@ with open("sdxl_loras.json", "r") as file:
49
  for item in data
50
  ]
51
 
 
 
 
 
52
  device = "cuda"
53
 
54
  state_dicts = {}
@@ -131,49 +135,20 @@ button.addEventListener('click', function() {
131
  element.classList.add('selected');
132
  });
133
  '''
134
- def update_selection(selected_state: gr.SelectData, sdxl_loras, is_new=False):
135
  lora_repo = sdxl_loras[selected_state.index]["repo"]
136
- instance_prompt = sdxl_loras[selected_state.index]["trigger_word"]
137
- new_placeholder = "Type a prompt. This LoRA applies for all prompts, no need for a trigger word" if instance_prompt == "" else "Type a prompt to use your selected LoRA"
138
  weight_name = sdxl_loras[selected_state.index]["weights"]
139
  updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨ {'(non-commercial LoRA, `cc-by-nc`)' if sdxl_loras[selected_state.index]['is_nc'] else '' }"
140
- is_compatible = sdxl_loras[selected_state.index]["is_compatible"]
141
- is_pivotal = sdxl_loras[selected_state.index]["is_pivotal"]
142
-
143
- use_with_diffusers = f'''
144
- ## Using [`{lora_repo}`](https://huggingface.co/{lora_repo})
145
-
146
- ## Use it with diffusers:
147
- '''
148
- if is_compatible:
149
- use_with_diffusers += f'''
150
- from diffusers import StableDiffusionXLPipeline
151
- import torch
152
-
153
- model_path = "stabilityai/stable-diffusion-xl-base-1.0"
154
- pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
155
- pipe.to("cuda")
156
- pipe.load_lora_weights("{lora_repo}", weight_name="{weight_name}")
157
-
158
- prompt = "{instance_prompt}..."
159
- lora_scale= 0.9
160
- image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5, cross_attention_kwargs={{"scale": lora_scale}}).images[0]
161
- image.save("image.png")
162
- '''
163
- elif not is_pivotal:
164
- use_with_diffusers += "This LoRA is not compatible with diffusers natively yet. But you can still use it on diffusers with `bmaltais/kohya_ss` LoRA class, check out this [Google Colab](https://colab.research.google.com/drive/14aEJsKdEQ9_kyfsiV6JDok799kxPul0j )"
165
- else:
166
- use_with_diffusers += f"This LoRA is not compatible with diffusers natively yet. But you can still use it on diffusers with sdxl-cog `TokenEmbeddingsHandler` class, check out the [model repo](https://huggingface.co/{lora_repo}#inference-with-🧨-diffusers)"
167
- use_with_uis = f'''
168
- ## Use it with Comfy UI, Invoke AI, SD.Next, AUTO1111:
169
 
170
- ### Download the `*.safetensors` weights of [here](https://huggingface.co/{lora_repo}/resolve/main/{weight_name})
 
 
 
 
 
 
171
 
172
- - [ComfyUI guide](https://comfyanonymous.github.io/ComfyUI_examples/lora/)
173
- - [Invoke AI guide](https://invoke-ai.github.io/InvokeAI/features/CONCEPTS/?h=lora#using-loras)
174
- - [SD.Next guide](https://github.com/vladmandic/automatic)
175
- - [AUTOMATIC1111 guide](https://stable-diffusion-art.com/lora/)
176
- '''
177
  if(is_new):
178
  if(selected_state.index == 0):
179
  selected_state.index = -9999
@@ -182,24 +157,23 @@ def update_selection(selected_state: gr.SelectData, sdxl_loras, is_new=False):
182
 
183
  return (
184
  updated_text,
185
- instance_prompt,
186
  gr.update(placeholder=new_placeholder),
187
- selected_state,
188
- use_with_diffusers,
189
- use_with_uis,
190
- gr.Gallery(selected_index=None)
 
 
191
  )
192
 
193
  def center_crop_image_as_square(img):
194
- square_size = min(img.size) # Use the smaller dimension of the image
195
 
196
- # Calculate the coordinates of the crop box
197
  left = (img.width - square_size) / 2
198
  top = (img.height - square_size) / 2
199
  right = (img.width + square_size) / 2
200
  bottom = (img.height + square_size) / 2
201
 
202
- # Perform the crop
203
  img_cropped = img.crop((left, top, right, bottom))
204
  return img_cropped
205
 
@@ -230,13 +204,21 @@ def merge_incompatible_lora(full_path_lora, lora_scale):
230
  del lora_model
231
  gc.collect()
232
 
233
- def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, sdxl_loras_new, progress=gr.Progress(track_tqdm=True)):
234
  global last_lora, last_merged, last_fused, pipe
235
  face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))
236
  face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*x['bbox'][3]-x['bbox'][1])[-1] # only use the maximum face
237
  face_emb = face_info['embedding']
238
  face_kps = draw_kps(face_image, face_info['kps'])
239
 
 
 
 
 
 
 
 
 
240
  #prepare face zoe
241
  with torch.no_grad():
242
  image_zoe = zoe(face_image)
@@ -245,12 +227,12 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
245
  images = [face_kps, image_zoe.resize((height, width))]
246
 
247
 
248
- if(selected_state.index < 0):
249
- if(selected_state.index == -9999):
250
- selected_state.index = 0
251
- else:
252
- selected_state.index *= -1
253
- sdxl_loras = sdxl_loras_new
254
  print("Selected State: ", selected_state.index)
255
  print(sdxl_loras[selected_state.index]["repo"])
256
  if negative == "":
@@ -342,7 +324,11 @@ with gr.Blocks(css="custom.css") as demo:
342
  photo = gr.Image(label="Upload a picture of yourself", interactive=True, type="pil")
343
  selected_loras = gr.Gallery(label="Selected LoRAs", height=80, show_share_button=False, visible=False, elem_id="gallery_selected", )
344
  order_gallery = gr.Radio(choices=["random", "likes"], value="random", label="Order by", elem_id="order_radio")
345
- new_gallery = gr.Gallery(label="New LoRAs", elem_id="gallery_new", columns=3, value=[(item["image"], item["title"]) for item in sdxl_loras_raw_new], allow_preview=False, show_share_button=False)
 
 
 
 
346
  gallery = gr.Gallery(
347
  #value=[(item["image"], item["title"]) for item in sdxl_loras],
348
  label="SDXL LoRA Gallery",
@@ -359,7 +345,7 @@ with gr.Blocks(css="custom.css") as demo:
359
  elem_id="selected_lora",
360
  )
361
  with gr.Row():
362
- prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA", elem_id="prompt")
363
  button = gr.Button("Run", elem_id="run_button")
364
  with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
365
  community_icon = gr.HTML(community_icon_html)
@@ -375,43 +361,7 @@ with gr.Blocks(css="custom.css") as demo:
375
  weight = gr.Slider(0, 10, value=0.9, step=0.1, label="LoRA weight")
376
  guidance_scale = gr.Slider(0, 50, value=7, step=0.1, label="Guidance Scale")
377
  depth_control_scale = gr.Slider(0, 1, value=0.8, step=0.01, label="Zoe Depth ControlNet strenght")
378
- with gr.Column(elem_id="extra_info"):
379
- with gr.Accordion(
380
- "Use it with: 🧨 diffusers, ComfyUI, Invoke AI, SD.Next, AUTO1111",
381
- open=False,
382
- elem_id="accordion",
383
- ):
384
- with gr.Row():
385
- use_diffusers = gr.Markdown("""## Select a LoRA first 🤗""")
386
- use_uis = gr.Markdown()
387
- with gr.Accordion("Submit a LoRA! 📥", open=False):
388
- submit_title = gr.Markdown(
389
- "### Streamlined submission coming soon! Until then [suggest your LoRA in the community tab](https://huggingface.co/spaces/multimodalart/LoraTheExplorer/discussions) 🤗"
390
- )
391
- with gr.Group(elem_id="soon"):
392
- submit_source = gr.Radio(
393
- ["Hugging Face", "CivitAI"],
394
- label="LoRA source",
395
- value="Hugging Face",
396
- )
397
- with gr.Row():
398
- submit_source_hf = gr.Textbox(
399
- label="Hugging Face Model Repo",
400
- info="In the format `username/model_id`",
401
- )
402
- submit_safetensors_hf = gr.Textbox(
403
- label="Safetensors filename",
404
- info="The filename `*.safetensors` in the model repo",
405
- )
406
- with gr.Row():
407
- submit_trigger_word_hf = gr.Textbox(label="Trigger word")
408
- submit_image = gr.Image(
409
- label="Example image (optional if the repo already contains images)"
410
- )
411
- submit_button = gr.Button("Submit!")
412
- submit_disclaimer = gr.Markdown(
413
- "This is a curated gallery by me, [apolinário (multimodal.art)](https://twitter.com/multimodalart). I'll try to include as many cool LoRAs as they are submitted! You can [duplicate this Space](https://huggingface.co/spaces/multimodalart/LoraTheExplorer?duplicate=true) to use it privately, and add your own LoRAs by editing `sdxl_loras.json` in the Files tab of your private space."
414
- )
415
  order_gallery.change(
416
  fn=swap_gallery,
417
  inputs=[order_gallery, gr_sdxl_loras],
@@ -420,18 +370,18 @@ with gr.Blocks(css="custom.css") as demo:
420
  )
421
  gallery.select(
422
  fn=update_selection,
423
- inputs=[gr_sdxl_loras],
424
- outputs=[prompt_title, prompt, prompt, selected_state, use_diffusers, use_uis, new_gallery],
425
- queue=False,
426
- show_progress=False
427
- )
428
- new_gallery.select(
429
- fn=update_selection,
430
- inputs=[gr_sdxl_loras_new, gr.State(True)],
431
- outputs=[prompt_title, prompt, prompt, selected_state, use_diffusers, use_uis, gallery],
432
  queue=False,
433
  show_progress=False
434
  )
 
 
 
 
 
 
 
435
  prompt.submit(
436
  fn=check_selected,
437
  inputs=[selected_state],
@@ -445,7 +395,7 @@ with gr.Blocks(css="custom.css") as demo:
445
  show_progress=False,
446
  ).success(
447
  fn=run_lora,
448
- inputs=[photo, prompt, negative, weight, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, gr_sdxl_loras, gr_sdxl_loras_new],
449
  outputs=[result, share_group],
450
  )
451
  button.click(
@@ -461,7 +411,7 @@ with gr.Blocks(css="custom.css") as demo:
461
  show_progress=False,
462
  ).success(
463
  fn=run_lora,
464
- inputs=[photo, prompt, negative, weight, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, gr_sdxl_loras, gr_sdxl_loras_new],
465
  outputs=[result, share_group],
466
  )
467
  share_button.click(None, [], [], js=share_js)
 
49
  for item in data
50
  ]
51
 
52
+ with open("defaults_data.json", "r") as file:
53
+ lora_defaults = json.load(file)
54
+
55
+
56
  device = "cuda"
57
 
58
  state_dicts = {}
 
135
  element.classList.add('selected');
136
  });
137
  '''
138
+ def update_selection(selected_state: gr.SelectData, sdxl_loras, face_strength, image_strength, weight, depth_control_scale, negative, is_new=False):
139
  lora_repo = sdxl_loras[selected_state.index]["repo"]
140
+ new_placeholder = "Type a prompt to use your selected LoRA"
 
141
  weight_name = sdxl_loras[selected_state.index]["weights"]
142
  updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨ {'(non-commercial LoRA, `cc-by-nc`)' if sdxl_loras[selected_state.index]['is_nc'] else '' }"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
+ for lora_list in lora_defaults:
145
+ if lora_list["model"] == sdxl_loras[selected_state.index]["repo"]:
146
+ face_strength = lora_list.get("face_strength", face_strength)
147
+ image_strength = lora_list.get("image_strength", image_strength)
148
+ weight = lora_list.get("weight", weight)
149
+ depth_control_scale = lora_list.get("depth_control_scale", depth_control_scale)
150
+ negative = lora_list.get("negative", negative)
151
 
 
 
 
 
 
152
  if(is_new):
153
  if(selected_state.index == 0):
154
  selected_state.index = -9999
 
157
 
158
  return (
159
  updated_text,
 
160
  gr.update(placeholder=new_placeholder),
161
+ face_strength,
162
+ image_strength,
163
+ weight,
164
+ depth_control_scale,
165
+ negative,
166
+ selected_state
167
  )
168
 
169
  def center_crop_image_as_square(img):
170
+ square_size = min(img.size)
171
 
 
172
  left = (img.width - square_size) / 2
173
  top = (img.height - square_size) / 2
174
  right = (img.width + square_size) / 2
175
  bottom = (img.height + square_size) / 2
176
 
 
177
  img_cropped = img.crop((left, top, right, bottom))
178
  return img_cropped
179
 
 
204
  del lora_model
205
  gc.collect()
206
 
207
+ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, progress=gr.Progress(track_tqdm=True)):
208
  global last_lora, last_merged, last_fused, pipe
209
  face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))
210
  face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*x['bbox'][3]-x['bbox'][1])[-1] # only use the maximum face
211
  face_emb = face_info['embedding']
212
  face_kps = draw_kps(face_image, face_info['kps'])
213
 
214
+ for lora_list in lora_defaults:
215
+ if lora_list["model"] == sdxl_loras[selected_state.index]["repo"]:
216
+ prompt_full = lora_list["model"].get("prompt", None)
217
+ if(prompt_full):
218
+ prompt = prompt_full.replace("<subject>", prompt)
219
+
220
+
221
+ print("Prompt:", prompt)
222
  #prepare face zoe
223
  with torch.no_grad():
224
  image_zoe = zoe(face_image)
 
227
  images = [face_kps, image_zoe.resize((height, width))]
228
 
229
 
230
+ #if(selected_state.index < 0):
231
+ # if(selected_state.index == -9999):
232
+ # selected_state.index = 0
233
+ # else:
234
+ # selected_state.index *= -1
235
+ #sdxl_loras = sdxl_loras_new
236
  print("Selected State: ", selected_state.index)
237
  print(sdxl_loras[selected_state.index]["repo"])
238
  if negative == "":
 
324
  photo = gr.Image(label="Upload a picture of yourself", interactive=True, type="pil")
325
  selected_loras = gr.Gallery(label="Selected LoRAs", height=80, show_share_button=False, visible=False, elem_id="gallery_selected", )
326
  order_gallery = gr.Radio(choices=["random", "likes"], value="random", label="Order by", elem_id="order_radio")
327
+ #new_gallery = gr.Gallery(
328
+ # label="New LoRAs",
329
+ # elem_id="gallery_new",
330
+ # columns=3,
331
+ # value=[(item["image"], item["title"]) for item in sdxl_loras_raw_new], allow_preview=False, show_share_button=False)
332
  gallery = gr.Gallery(
333
  #value=[(item["image"], item["title"]) for item in sdxl_loras],
334
  label="SDXL LoRA Gallery",
 
345
  elem_id="selected_lora",
346
  )
347
  with gr.Row():
348
+ prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="A person",, elem_id="prompt")
349
  button = gr.Button("Run", elem_id="run_button")
350
  with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
351
  community_icon = gr.HTML(community_icon_html)
 
361
  weight = gr.Slider(0, 10, value=0.9, step=0.1, label="LoRA weight")
362
  guidance_scale = gr.Slider(0, 50, value=7, step=0.1, label="Guidance Scale")
363
  depth_control_scale = gr.Slider(0, 1, value=0.8, step=0.01, label="Zoe Depth ControlNet strenght")
364
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365
  order_gallery.change(
366
  fn=swap_gallery,
367
  inputs=[order_gallery, gr_sdxl_loras],
 
370
  )
371
  gallery.select(
372
  fn=update_selection,
373
+ inputs=[gr_sdxl_loras, face_strength, image_strength, weight, depth_control_scale, negative],
374
+ outputs=[prompt_title, prompt, face_strength, image_strength, weight, depth_control_scale, negative, selected_state],
 
 
 
 
 
 
 
375
  queue=False,
376
  show_progress=False
377
  )
378
+ #new_gallery.select(
379
+ # fn=update_selection,
380
+ # inputs=[gr_sdxl_loras_new, gr.State(True)],
381
+ # outputs=[prompt_title, prompt, prompt, selected_state, gallery],
382
+ # queue=False,
383
+ # show_progress=False
384
+ #)
385
  prompt.submit(
386
  fn=check_selected,
387
  inputs=[selected_state],
 
395
  show_progress=False,
396
  ).success(
397
  fn=run_lora,
398
+ inputs=[photo, prompt, negative, weight, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, gr_sdxl_loras],
399
  outputs=[result, share_group],
400
  )
401
  button.click(
 
411
  show_progress=False,
412
  ).success(
413
  fn=run_lora,
414
+ inputs=[photo, prompt, negative, weight, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, gr_sdxl_loras],
415
  outputs=[result, share_group],
416
  )
417
  share_button.click(None, [], [], js=share_js)