Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
import os | |
from torchvision.transforms.functional import pil_to_tensor, to_pil_image | |
MODEL_DIR = "models" | |
MODEL_FILENAME = "lama.pt" | |
LOCAL_MODEL = os.path.join(MODEL_DIR, MODEL_FILENAME) | |
if not os.path.exists(LOCAL_MODEL): | |
from huggingface_hub import hf_hub_download | |
REPO_ID = "JosephCatrambone/big-lama-torchscript" | |
LOCAL_MODEL = hf_hub_download(repo_id=REPO_ID, filename=MODEL_FILENAME, local_dir=MODEL_DIR, local_dir_use_symlinks=False) | |
model = torch.jit.load(LOCAL_MODEL) | |
def predict(input_img, input_mask): | |
# numpy gives the image as (w,h,c) | |
# Image shape should be (1, 3, 512, 512) and be in the range 0-1. | |
# Mask shape should be (1, 1, 512, 512) AND have values 0.0 or 1.0, not in-between. | |
#out = model(torch.tensor(input_img[None, (2,0,1), :, :])/255.0, torch.tensor(1 * (input_mask[:,:,0] > 0)).unsqueeze(0)) | |
out = model((pil_to_tensor(input_img.convert('RGB')) / 255.0).unsqueeze(0), 1 * (pil_to_tensor(input_mask.convert('L')) > 0).unsqueeze(0))[0] | |
return to_pil_image(out) | |
gradio_app = gr.Interface( | |
predict, | |
inputs=[ | |
gr.Image(label="Select Base Image", sources=['upload',], type="pil"), | |
gr.Image(label="Select Image Mask (White will be inpainted)", sources=['upload',], type="pil"), | |
], | |
outputs=[gr.Image(label="Inpainted Image"),], | |
title="LAMA Inpainting", | |
) | |
if __name__ == "__main__": | |
gradio_app.launch() |