ControlVideo / app.py
fffiloni's picture
Update app.py
39489bd
raw
history blame
1.74 kB
import gradio as gr
import os
from huggingface_hub import snapshot_download
model_ids = [
'runwayml/stable-diffusion-v1-5',
'lllyasviel/sd-controlnet-depth',
'lllyasviel/sd-controlnet-canny',
'lllyasviel/sd-controlnet-openpose',
]
for model_id in model_ids:
model_name = model_id.split('/')[-1]
snapshot_download(model_id, local_dir=f'checkpoints/{model_name}')
import subprocess
def run_inference(prompt, video_path, condition, video_length):
command = "python inference.py --prompt prompt --condition condition --video_path video_path --output_path 'outputs/' --video_length video_length --smoother_steps 19 20"
output = subprocess.check_output(command, shell=True, text=True)
output = output.strip() # Remove any leading/trailing whitespace
# Process the output as needed
print("Command output:", output)
return "done"
#return f"{output_path}/{prompt}.mp4"
with gr.Blocks() as demo:
with gr.Column():
prompt = gr.Textbox(label="prompt")
video_path = gr.Video(source="upload", type="filepath")
condition = gr.Textbox(label="Condition", value="depth")
video_length = gr.Slider(label="video length", minimum=1, maximum=15, step=1, value=2)
#seed = gr.Number(label="seed", value=42)
submit_btn = gr.Button("Submit")
#video_res = gr.Video(label="result")
video_res = gr.Textbox(label="result")
submit_btn.click(fn=run_inference,
inputs=[prompt,
video_path,
condition,
video_length
],
outputs=[video_res])
demo.queue(max_size=12).launch()