Spaces:
Paused
Paused
Commit
•
aa6b3a7
1
Parent(s):
c51e24b
Update app.py
Browse files
app.py
CHANGED
@@ -52,6 +52,7 @@ sdxl_loras_raw_new = [item for item in sdxl_loras_raw if item.get("new") == True
|
|
52 |
|
53 |
sdxl_loras_raw = [item for item in sdxl_loras_raw if item.get("new") != True]
|
54 |
|
|
|
55 |
|
56 |
vae = AutoencoderKL.from_pretrained(
|
57 |
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
|
@@ -184,39 +185,20 @@ def run_lora(prompt, negative, lora_scale, selected_state, sdxl_loras, sdxl_lora
|
|
184 |
loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
|
185 |
cross_attention_kwargs = None
|
186 |
if last_lora != repo_name:
|
187 |
-
if
|
188 |
-
del pipe
|
189 |
-
gc.collect()
|
190 |
-
pipe = copy.deepcopy(original_pipe)
|
191 |
-
pipe.to(device)
|
192 |
-
elif(last_fused):
|
193 |
pipe.unfuse_lora()
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
last_fused = True
|
207 |
-
|
208 |
-
#Add the textual inversion embeddings from pivotal tuning models
|
209 |
-
text_embedding_name = sdxl_loras[selected_state.index]["text_embedding_weights"]
|
210 |
-
text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
|
211 |
-
tokenizers = [pipe.tokenizer, pipe.tokenizer_2]
|
212 |
-
embedding_path = hf_hub_download(repo_id=repo_name, filename=text_embedding_name, repo_type="model")
|
213 |
-
embhandler = TokenEmbeddingsHandler(text_encoders, tokenizers)
|
214 |
-
embhandler.load_embeddings(embedding_path)
|
215 |
-
|
216 |
-
else:
|
217 |
-
merge_incompatible_lora(full_path_lora, lora_scale)
|
218 |
-
last_fused=False
|
219 |
-
last_merged = True
|
220 |
|
221 |
image = pipe(
|
222 |
prompt=prompt,
|
|
|
52 |
|
53 |
sdxl_loras_raw = [item for item in sdxl_loras_raw if item.get("new") != True]
|
54 |
|
55 |
+
lcm_lora_id = "lcm-sd/lcm-sdxl-base-1.0-lora"
|
56 |
|
57 |
vae = AutoencoderKL.from_pretrained(
|
58 |
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
|
|
|
185 |
loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
|
186 |
cross_attention_kwargs = None
|
187 |
if last_lora != repo_name:
|
188 |
+
if(last_fused):
|
|
|
|
|
|
|
|
|
|
|
189 |
pipe.unfuse_lora()
|
190 |
+
pipe.load_lora_weights(loaded_state_dict)
|
191 |
+
pipe.fuse_lora()
|
192 |
+
last_fused = True
|
193 |
+
is_pivotal = sdxl_loras[selected_state.index]["is_pivotal"]
|
194 |
+
if(is_pivotal):
|
195 |
+
#Add the textual inversion embeddings from pivotal tuning models
|
196 |
+
text_embedding_name = sdxl_loras[selected_state.index]["text_embedding_weights"]
|
197 |
+
text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
|
198 |
+
tokenizers = [pipe.tokenizer, pipe.tokenizer_2]
|
199 |
+
embedding_path = hf_hub_download(repo_id=repo_name, filename=text_embedding_name, repo_type="model")
|
200 |
+
embhandler = TokenEmbeddingsHandler(text_encoders, tokenizers)
|
201 |
+
embhandler.load_embeddings(embedding_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
|
203 |
image = pipe(
|
204 |
prompt=prompt,
|