Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -17,11 +17,11 @@ controlnet = ControlNetModel.from_pretrained(
|
|
17 |
"diffusers/controlnet-canny-sdxl-1.0",
|
18 |
torch_dtype=torch.float16
|
19 |
)
|
20 |
-
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
21 |
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
22 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
23 |
controlnet=controlnet,
|
24 |
-
vae=vae,
|
25 |
torch_dtype=torch.float16,
|
26 |
variant="fp16",
|
27 |
use_safetensors=True
|
@@ -31,7 +31,7 @@ pipe.to("cuda")
|
|
31 |
custom_model = "fffiloni/eugene_jour_general"
|
32 |
|
33 |
# This is where you load your trained weights
|
34 |
-
pipe.load_lora_weights(custom_model, use_auth_token=True)
|
35 |
|
36 |
#pipe.enable_model_cpu_offload()
|
37 |
|
@@ -55,7 +55,7 @@ def infer(image_in, prompt, controlnet_conditioning_scale, guidance_scale):
|
|
55 |
prompt,
|
56 |
negative_prompt=negative_prompt,
|
57 |
image=image,
|
58 |
-
|
59 |
guidance_scale = guidance_scale,
|
60 |
num_inference_steps=50,
|
61 |
cross_attention_kwargs={"scale": lora_scale}
|
|
|
17 |
"diffusers/controlnet-canny-sdxl-1.0",
|
18 |
torch_dtype=torch.float16
|
19 |
)
|
20 |
+
#vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
21 |
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
22 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
23 |
controlnet=controlnet,
|
24 |
+
#vae=vae,
|
25 |
torch_dtype=torch.float16,
|
26 |
variant="fp16",
|
27 |
use_safetensors=True
|
|
|
31 |
custom_model = "fffiloni/eugene_jour_general"
|
32 |
|
33 |
# This is where you load your trained weights
|
34 |
+
pipe.load_lora_weights(custom_model, weight_name="pytorch_lora_weights.safetensors", use_auth_token=True)
|
35 |
|
36 |
#pipe.enable_model_cpu_offload()
|
37 |
|
|
|
55 |
prompt,
|
56 |
negative_prompt=negative_prompt,
|
57 |
image=image,
|
58 |
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
59 |
guidance_scale = guidance_scale,
|
60 |
num_inference_steps=50,
|
61 |
cross_attention_kwargs={"scale": lora_scale}
|