Spaces:
Running
Running
import gradio as gr | |
import os | |
import yaml | |
import tempfile | |
import huggingface_hub | |
import subprocess | |
HF_TKN = os.environ.get("GATED_HF_TOKEN") | |
huggingface_hub.login(token=HF_TKN) | |
huggingface_hub.hf_hub_download( | |
repo_id='yzd-v/DWPose', | |
filename='yolox_l.onnx', | |
local_dir='./models/DWPose', | |
local_dir_use_symlinks=False, | |
) | |
huggingface_hub.hf_hub_download( | |
repo_id='yzd-v/DWPose', | |
filename='dw-ll_ucoco_384.onnx', | |
local_dir='./models/DWPose', | |
local_dir_use_symlinks=False, | |
) | |
huggingface_hub.hf_hub_download( | |
repo_id='ixaac/MimicMotion', | |
filename='MimicMotion_1.pth', | |
local_dir='./models', | |
local_dir_use_symlinks=False, | |
) | |
def print_directory_contents(path): | |
for root, dirs, files in os.walk(path): | |
level = root.replace(path, '').count(os.sep) | |
indent = ' ' * 4 * (level) | |
print(f"{indent}{os.path.basename(root)}/") | |
subindent = ' ' * 4 * (level + 1) | |
for f in files: | |
print(f"{subindent}{f}") | |
# Path to the directory you want to print | |
directory_path = './models' | |
# Print the directory contents | |
print_directory_contents(directory_path) | |
def infer(ref_video_in, ref_image_in): | |
# Create a temporary directory | |
with tempfile.TemporaryDirectory() as temp_dir: | |
print("Temporary directory created:", temp_dir) | |
# Define the values for the variables | |
ref_video_path = ref_video_in | |
ref_image_path = ref_image_in | |
num_frames = 16 | |
resolution = 576 | |
frames_overlap = 6 | |
num_inference_steps = 25 | |
noise_aug_strength = 0 | |
guidance_scale = 2.0 | |
sample_stride = 2 | |
fps = 12 | |
seed = 42 | |
# Create the data structure | |
data = { | |
'base_model_path': 'stabilityai/stable-video-diffusion-img2vid-xt-1-1', | |
'ckpt_path': 'models/MimicMotion_1-1.pth', | |
'test_case': [ | |
{ | |
'ref_video_path': ref_video_path, | |
'ref_image_path': ref_image_path, | |
'num_frames': num_frames, | |
'resolution': resolution, | |
'frames_overlap': frames_overlap, | |
'num_inference_steps': num_inference_steps, | |
'noise_aug_strength': noise_aug_strength, | |
'guidance_scale': guidance_scale, | |
'sample_stride': sample_stride, | |
'fps': fps, | |
'seed': seed | |
} | |
] | |
} | |
# Define the file path | |
file_path = os.path.join(temp_dir, 'config.yaml') | |
# Write the data to a YAML file | |
with open(file_path, 'w') as file: | |
yaml.dump(data, file, default_flow_style=False) | |
print("YAML file 'config.yaml' created successfully in", file_path) | |
# Execute the inference command | |
command = ['python', 'inference.py', '--inference_config', file_path] | |
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) | |
# Print logs in real-time | |
while True: | |
output = process.stdout.readline() | |
if output == '' and process.poll() is not None: | |
break | |
if output: | |
print(output.strip()) | |
# Print any remaining output | |
for output in process.stdout: | |
print(output.strip()) | |
for error in process.stderr: | |
print(error.strip()) | |
# Wait for the process to complete and get the return code | |
return_code = process.wait() | |
print("Inference script finished with return code:", return_code) | |
return "done" | |
demo = gr.Interface( | |
fn = infer, | |
inputs = [gr.Video(), gr.Image(type="filepath")], | |
outputs = [gr.Textbox()] | |
) | |
demo.launch() |