import gradio as gr import os import time import argparse import yaml, math from tqdm import trange import torch import numpy as np from omegaconf import OmegaConf import torch.distributed as dist from pytorch_lightning import seed_everything from lvdm.samplers.ddim import DDIMSampler from lvdm.utils.common_utils import str2bool from lvdm.utils.dist_utils import setup_dist, gather_data from lvdm.utils.saving_utils import npz_to_video_grid, npz_to_imgsheet_5d from utils import load_model, get_conditions, make_model_input_shape, torch_to_np config_path = "model_config.yaml" config = OmegaConf.load(config_path) # # get model & sampler # model, _, _ = load_model(config, opt.ckpt_path, # inject_lora=opt.inject_lora, # lora_scale=opt.lora_scale, # lora_path=opt.lora_path # ) # ddim_sampler = DDIMSampler(model) if opt.sample_type == "ddim" else None def greet(name): return "Hello " + name + "!!" iface = gr.Interface(fn=greet, inputs="text", outputs="text") iface.launch()