import gradio as gr from huggingface_hub import login import os hf_token = os.environ.get("HF_TOKEN") login(token=hf_token) from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL from diffusers.utils import load_image from PIL import Image import torch import numpy as np import cv2 controlnet = ControlNetModel.from_pretrained( "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 ) vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.to("cuda") custom_model = "fffiloni/eugene_jour_general" # This is where you load your trained weights pipe.load_lora_weights(custom_model, use_auth_token=True) #pipe.enable_model_cpu_offload() def infer(image_in, prompt, controlnet_conditioning_scale, guidance_scale): prompt = prompt negative_prompt = "" image = load_image(image_in) #controlnet_conditioning_scale = 0.25 # recommended for good generalization image = np.array(image) image = cv2.Canny(image, 100, 200) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) image = Image.fromarray(image) lora_scale= 0.9 images = pipe( prompt, negative_prompt=negative_prompt, image=image, #controlnet_conditioning_scale=controlnet_conditioning_scale, guidance_scale = guidance_scale, num_inference_steps=50, cross_attention_kwargs={"scale": lora_scale} ).images images[0].save(f"hug_lab.png") return f"hug_lab.png" with gr.Blocks() as demo: with gr.Column(): image_in = gr.Image(source="upload", type="filepath") prompt = gr.Textbox(label="Prompt") guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=5.0, type="float") controlnet_conditioning_scale = gr.Slider(label="Controlnet conditioning Scale", minimum=0.0, maximum=1.0, step=0.1, value=0.5, type="float") submit_btn = gr.Button("Submit") result = gr.Image(label="Result") submit_btn.click( fn = infer, inputs = [image_in, prompt, controlnet_conditioning_scale, guidance_scale ], outputs = [result] ) demo.queue().launch()