Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
8741760
1
Parent(s):
e2bdec1
Update app.py
Browse files
app.py
CHANGED
@@ -207,7 +207,12 @@ def merge_incompatible_lora(full_path_lora, lora_scale):
|
|
207 |
del lora_model
|
208 |
gc.collect()
|
209 |
@spaces.GPU
|
210 |
-
def generate_image(
|
|
|
|
|
|
|
|
|
|
|
211 |
image = pipe(
|
212 |
prompt_embeds=conditioning,
|
213 |
pooled_prompt_embeds=pooled,
|
@@ -300,13 +305,7 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
|
|
300 |
pipe.load_textual_inversion(state_dict_embedding["text_encoders_0"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
|
301 |
pipe.load_textual_inversion(state_dict_embedding["text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
|
302 |
|
303 |
-
|
304 |
-
if(negative):
|
305 |
-
negative_conditioning, negative_pooled = compel(negative)
|
306 |
-
else:
|
307 |
-
negative_conditioning, negative_pooled = None, None
|
308 |
-
|
309 |
-
image = generate_image(conditioning, pooled, negative_conditioning, negative_pooled, face_emb, face_image, image_strength, images, guidance_scale, face_strength, depth_control_scale)
|
310 |
last_lora = repo_name
|
311 |
gc.collect()
|
312 |
return image, gr.update(visible=True)
|
|
|
207 |
del lora_model
|
208 |
gc.collect()
|
209 |
@spaces.GPU
|
210 |
+
def generate_image(prompt, negative, face_emb, face_image, image_strength, images, guidance_scale, face_strength, depth_control_scale):
|
211 |
+
conditioning, pooled = compel(prompt)
|
212 |
+
if(negative):
|
213 |
+
negative_conditioning, negative_pooled = compel(negative)
|
214 |
+
else:
|
215 |
+
negative_conditioning, negative_pooled = None, None
|
216 |
image = pipe(
|
217 |
prompt_embeds=conditioning,
|
218 |
pooled_prompt_embeds=pooled,
|
|
|
305 |
pipe.load_textual_inversion(state_dict_embedding["text_encoders_0"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
|
306 |
pipe.load_textual_inversion(state_dict_embedding["text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
|
307 |
|
308 |
+
image = generate_image(prompt, negative, face_emb, face_image, image_strength, images, guidance_scale, face_strength, depth_control_scale)
|
|
|
|
|
|
|
|
|
|
|
|
|
309 |
last_lora = repo_name
|
310 |
gc.collect()
|
311 |
return image, gr.update(visible=True)
|