|
import gradio as gr |
|
import torch |
|
from diffusers import AnimateDiffPipeline, MotionAdapter, DDIMScheduler |
|
from diffusers.utils import export_to_gif |
|
import random |
|
|
|
def generate_gif(image, animation_type): |
|
|
|
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) |
|
|
|
|
|
model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE" |
|
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) |
|
|
|
|
|
scheduler = DDIMScheduler( |
|
clip_sample=False, |
|
beta_start=0.00085, |
|
beta_end=0.012, |
|
beta_schedule="linear", |
|
timestep_spacing="trailing", |
|
steps_offset=1 |
|
) |
|
pipe.scheduler = scheduler |
|
|
|
|
|
pipe.enable_vae_slicing() |
|
pipe.enable_model_cpu_offload() |
|
|
|
|
|
pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") |
|
|
|
|
|
pipe.load_lora_weights(f"guoyww/animatediff-motion-lora-{animation_type}", adapter_name=animation_type) |
|
|
|
|
|
seed = random.randint(0, 2**32 - 1) |
|
prompt = "best quality, high quality, trending on artstation" |
|
|
|
|
|
adapter_weight = [0.75] |
|
|
|
pipe.set_adapters([animation_type], adapter_weights=adapter_weight) |
|
|
|
|
|
output = pipe( |
|
prompt=prompt, |
|
num_frames=16, |
|
guidance_scale=7.5, |
|
num_inference_steps=30, |
|
ip_adapter_image=image, |
|
generator=torch.Generator("cpu").manual_seed(seed), |
|
) |
|
frames = output.frames[0] |
|
|
|
gif_path = "output_animation.gif" |
|
export_to_gif(frames, gif_path) |
|
return gif_path |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_gif, |
|
inputs=[gr.Image(type="pil"), gr.Radio(["zoom-out", "tilt-up", "pan-left"])], |
|
outputs=gr.Image(type="pil", label="Generated GIF"), |
|
title="AnimateDiff + IP Adapter Demo", |
|
description="Upload an image and select an motion module type to generate a GIF!" |
|
) |
|
|
|
iface.launch(debug=True,share=True) |