multimodalart HF staff commited on
Commit
645b9bf
β€’
1 Parent(s): 3f3a00c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -18
app.py CHANGED
@@ -128,7 +128,6 @@ zoe.to(device)
128
  pipe.to(device)
129
 
130
  last_lora = ""
131
- last_merged = False
132
  last_fused = False
133
  js = '''
134
  var button = document.getElementById('button');
@@ -205,8 +204,20 @@ def merge_incompatible_lora(full_path_lora, lora_scale):
205
  del weights_sd
206
  del lora_model
207
  @spaces.GPU
208
- def generate_image(prompt, negative, face_emb, face_image, image_strength, images, guidance_scale, face_strength, depth_control_scale, last_lora, repo_name, loaded_state_dict, lora_scale, sdxl_loras, selected_state_index):
209
- global last_fused
 
 
 
 
 
 
 
 
 
 
 
 
210
  if last_lora != repo_name:
211
  if(last_fused):
212
  st = time.time()
@@ -259,10 +270,10 @@ def generate_image(prompt, negative, face_emb, face_image, image_strength, image
259
  guidance_scale = guidance_scale,
260
  controlnet_conditioning_scale=[face_strength, depth_control_scale],
261
  ).images[0]
 
262
  return image
263
 
264
  def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, progress=gr.Progress(track_tqdm=True)):
265
- global last_lora, last_merged, last_fused, pipe
266
  selected_state_index = selected_state.index
267
  face_image = center_crop_image_as_square(face_image)
268
  st = time.time()
@@ -276,6 +287,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
276
  et = time.time()
277
  elapsed_time = et - st
278
  print('Calculating face embeds took: ', elapsed_time, 'seconds')
 
279
  for lora_list in lora_defaults:
280
  if lora_list["model"] == sdxl_loras[selected_state_index]["repo"]:
281
  prompt_full = lora_list.get("prompt", None)
@@ -286,15 +298,6 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
286
  print("Prompt:", prompt)
287
  if(prompt == ""):
288
  prompt = "a person"
289
- #prepare face zoe
290
- st = time.time()
291
- with torch.no_grad():
292
- image_zoe = zoe(face_image)
293
- et = time.time()
294
- elapsed_time = et - st
295
- print('Zoe Depth calculations took: ', elapsed_time, 'seconds')
296
- width, height = face_kps.size
297
- images = [face_kps, image_zoe.resize((height, width))]
298
 
299
  #if(selected_state.index < 0):
300
  # if(selected_state.index == -9999):
@@ -315,12 +318,8 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
315
  full_path_lora = state_dicts[repo_name]["saved_name"]
316
  loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
317
  cross_attention_kwargs = None
318
- print("Last LoRA: ", last_lora)
319
- print("Current LoRA: ", repo_name)
320
- print("Last fused: ", last_fused)
321
 
322
- image = generate_image(prompt, negative, face_emb, face_image, image_strength, images, guidance_scale, face_strength, depth_control_scale, last_lora, repo_name, loaded_state_dict, lora_scale, sdxl_loras, selected_state_index)
323
- last_lora = repo_name
324
  return image, gr.update(visible=True)
325
 
326
  def shuffle_gallery(sdxl_loras):
 
128
  pipe.to(device)
129
 
130
  last_lora = ""
 
131
  last_fused = False
132
  js = '''
133
  var button = document.getElementById('button');
 
204
  del weights_sd
205
  del lora_model
206
  @spaces.GPU
207
+ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, images, guidance_scale, face_strength, depth_control_scale, repo_name, loaded_state_dict, lora_scale, sdxl_loras, selected_state_index):
208
+ global last_fused, last_lora
209
+ print("Last LoRA: ", last_lora)
210
+ print("Current LoRA: ", repo_name)
211
+ print("Last fused: ", last_fused)
212
+ #prepare face zoe
213
+ st = time.time()
214
+ with torch.no_grad():
215
+ image_zoe = zoe(face_image)
216
+ width, height = face_kps.size
217
+ images = [face_kps, image_zoe.resize((height, width))]
218
+ et = time.time()
219
+ elapsed_time = et - st
220
+ print('Zoe Depth calculations took: ', elapsed_time, 'seconds')
221
  if last_lora != repo_name:
222
  if(last_fused):
223
  st = time.time()
 
270
  guidance_scale = guidance_scale,
271
  controlnet_conditioning_scale=[face_strength, depth_control_scale],
272
  ).images[0]
273
+ last_lora = repo_name
274
  return image
275
 
276
  def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, progress=gr.Progress(track_tqdm=True)):
 
277
  selected_state_index = selected_state.index
278
  face_image = center_crop_image_as_square(face_image)
279
  st = time.time()
 
287
  et = time.time()
288
  elapsed_time = et - st
289
  print('Calculating face embeds took: ', elapsed_time, 'seconds')
290
+
291
  for lora_list in lora_defaults:
292
  if lora_list["model"] == sdxl_loras[selected_state_index]["repo"]:
293
  prompt_full = lora_list.get("prompt", None)
 
298
  print("Prompt:", prompt)
299
  if(prompt == ""):
300
  prompt = "a person"
 
 
 
 
 
 
 
 
 
301
 
302
  #if(selected_state.index < 0):
303
  # if(selected_state.index == -9999):
 
318
  full_path_lora = state_dicts[repo_name]["saved_name"]
319
  loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
320
  cross_attention_kwargs = None
 
 
 
321
 
322
+ image = generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, images, guidance_scale, face_strength, depth_control_scale, repo_name, loaded_state_dict, lora_scale, sdxl_loras, selected_state_index)
 
323
  return image, gr.update(visible=True)
324
 
325
  def shuffle_gallery(sdxl_loras):