Spaces:
Runtime error
Runtime error
File size: 1,375 Bytes
a7b5cc1 b6eec52 5d4ea36 b6eec52 a7b5cc1 5d4ea36 a7b5cc1 b6eec52 5d4ea36 a7b5cc1 5d4ea36 a7b5cc1 5d4ea36 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import gradio as gr
import torch
import os
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
MODEL_DIR = "models"
MODEL_FILENAME = "lama.pt"
LOCAL_MODEL = os.path.join(MODEL_DIR, MODEL_FILENAME)
if not os.path.exists(LOCAL_MODEL):
from huggingface_hub import hf_hub_download
REPO_ID = "JosephCatrambone/big-lama-torchscript"
LOCAL_MODEL = hf_hub_download(repo_id=REPO_ID, filename=MODEL_FILENAME, local_dir=MODEL_DIR, local_dir_use_symlinks=False)
model = torch.jit.load(LOCAL_MODEL)
def predict(input_img, input_mask):
# numpy gives the image as (w,h,c)
# Image shape should be (1, 3, 512, 512) and be in the range 0-1.
# Mask shape should be (1, 1, 512, 512) AND have values 0.0 or 1.0, not in-between.
#out = model(torch.tensor(input_img[None, (2,0,1), :, :])/255.0, torch.tensor(1 * (input_mask[:,:,0] > 0)).unsqueeze(0))
out = model((pil_to_tensor(input_img.convert('RGB')) / 255.0).unsqueeze(0), 1 * (pil_to_tensor(input_mask.convert('L')) > 0).unsqueeze(0))[0]
return to_pil_image(out)
gradio_app = gr.Interface(
predict,
inputs=[
gr.Image(label="Select Base Image", sources=['upload',], type="pil"),
gr.Image(label="Select Image Mask (White will be inpainted)", sources=['upload',], type="pil"),
],
outputs=[gr.Image(label="Inpainted Image"),],
title="LAMA Inpainting",
)
if __name__ == "__main__":
gradio_app.launch() |