from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler from huggingface_hub import hf_hub_download from safetensors.torch import load_file import spaces import gradio as gr import torch import PIL # Constants base = "stabilityai/stable-diffusion-xl-base-1.0" repo = "tianweiy/DMD2" checkpoints = { "1-Step" : ["dmd2_sdxl_1step_unet.bin", 1], "4-Step" : ["dmd2_sdxl_4step_unet.bin", 4], } loaded = None CSS = """ .gradio-container { max-width: 690px !important; } """ # Ensure model and scheduler are initialized in GPU-enabled function unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16) if torch.cuda.is_available(): pipe = DiffusionPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda") # Function @spaces.GPU() def generate_image(prompt, ckpt): global loaded print(prompt, ckpt) checkpoint = checkpoints[ckpt][0] num_inference_steps = checkpoints[ckpt][1] if loaded != num_inference_steps: unet.load_state_dict(torch.load(hf_hub_download(repo, checkpoint), map_location="cuda")) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample" if num_inference_steps==1 else "epsilon") loaded = num_inference_steps results = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=0) return results.images[0] # Gradio Interface with gr.Blocks(css=CSS) as demo: gr.HTML("

Adobe DMD2🦖

") gr.HTML("

DMD2 text-to-image generation

") with gr.Group(): with gr.Row(): prompt = gr.Textbox(label='Enter your prompt (English)', scale=8) ckpt = gr.Dropdown(label='Select inference steps',choices=['1-Step', '2-Step', '4-Step', '8-Step'], value='4-Step', interactive=True) submit = gr.Button(scale=1, variant='primary') img = gr.Image(label='DMD2 Generated Image') prompt.submit(fn=generate_image, inputs=[prompt, ckpt], outputs=img, ) submit.click(fn=generate_image, inputs=[prompt, ckpt], outputs=img, ) demo.queue().launch()