Spaces:
Running
on
A100
Running
on
A100
Update LoRA loading
Browse files
app.py
CHANGED
@@ -58,9 +58,7 @@ if TORCH_COMPILE:
|
|
58 |
|
59 |
# Load LCM LoRA
|
60 |
pipe.load_lora_weights(
|
61 |
-
"
|
62 |
-
weight_name="lcm_sdxl_lora.safetensors",
|
63 |
-
#adapter_name="lcm",
|
64 |
use_auth_token=HF_TOKEN,
|
65 |
)
|
66 |
|
@@ -144,7 +142,7 @@ with gr.Blocks(css=css) as demo:
|
|
144 |
from diffusers import DiffusionPipeline, LCMScheduler
|
145 |
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0").to("cuda")
|
146 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
147 |
-
pipe.load_lora_weights("
|
148 |
|
149 |
results = pipe(
|
150 |
prompt="The spirit of a tamagotchi wandering in the city of Vienna",
|
|
|
58 |
|
59 |
# Load LCM LoRA
|
60 |
pipe.load_lora_weights(
|
61 |
+
"latent-consistency/lcm-lora-sdxl",
|
|
|
|
|
62 |
use_auth_token=HF_TOKEN,
|
63 |
)
|
64 |
|
|
|
142 |
from diffusers import DiffusionPipeline, LCMScheduler
|
143 |
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0").to("cuda")
|
144 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
145 |
+
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") #yes, it's a normal LoRA
|
146 |
|
147 |
results = pipe(
|
148 |
prompt="The spirit of a tamagotchi wandering in the city of Vienna",
|