multimodalart HF staff commited on
Commit
fb471fe
1 Parent(s): 71727de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -206,11 +206,13 @@ def merge_incompatible_lora(full_path_lora, lora_scale):
206
  del lora_model
207
  @spaces.GPU
208
  def generate_image(prompt, negative, face_emb, face_image, image_strength, images, guidance_scale, face_strength, depth_control_scale):
 
209
  conditioning, pooled = compel(prompt)
210
  if(negative):
211
  negative_conditioning, negative_pooled = compel(negative)
212
  else:
213
  negative_conditioning, negative_pooled = None, None
 
214
  image = pipe(
215
  prompt_embeds=conditioning,
216
  pooled_prompt_embeds=pooled,
@@ -283,7 +285,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
283
  if last_lora != repo_name:
284
  if(last_fused):
285
  pipe.unfuse_lora()
286
- pipe.unload_lora_weights()
287
  pipe.load_lora_weights(loaded_state_dict)
288
  pipe.fuse_lora(lora_scale)
289
  last_fused = True
 
206
  del lora_model
207
  @spaces.GPU
208
  def generate_image(prompt, negative, face_emb, face_image, image_strength, images, guidance_scale, face_strength, depth_control_scale):
209
+ print("Processing prompt...")
210
  conditioning, pooled = compel(prompt)
211
  if(negative):
212
  negative_conditioning, negative_pooled = compel(negative)
213
  else:
214
  negative_conditioning, negative_pooled = None, None
215
+ print("Processing image...")
216
  image = pipe(
217
  prompt_embeds=conditioning,
218
  pooled_prompt_embeds=pooled,
 
285
  if last_lora != repo_name:
286
  if(last_fused):
287
  pipe.unfuse_lora()
288
+ pipe.unload_lora_weights()
289
  pipe.load_lora_weights(loaded_state_dict)
290
  pipe.fuse_lora(lora_scale)
291
  last_fused = True