Spaces:
Runtime error
Runtime error
File size: 6,566 Bytes
cedb0cd 02a96b6 b4a24a2 cedb0cd b4a24a2 cedb0cd b4a24a2 cedb0cd b40b5cb cedb0cd b4a24a2 cedb0cd b4a24a2 b40b5cb 77a4dd4 b40b5cb cedb0cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
import gradio as gr
import cv2
import gradio as gr
import os
from pathlib import Path
from PIL import Image
import numpy as np
import torch
from torch.autograd import Variable
from torchvision import transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
import warnings
import tempfile
from zipfile import ZipFile
warnings.filterwarnings("ignore")
# project imports
from data_loader_cache import normalize, im_reader, im_preprocess
from models import *
#Helpers
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class GOSNormalize(object):
'''
Normalize the Image using torch.transforms
'''
def __init__(self, mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]):
self.mean = mean
self.std = std
def __call__(self,image):
image = normalize(image,self.mean,self.std)
return image
transform = transforms.Compose([GOSNormalize([0.5,0.5,0.5],[1.0,1.0,1.0])])
def load_image(im_path, hypar):
im = im_reader(im_path)
im, im_shp = im_preprocess(im, hypar["cache_size"])
im = torch.divide(im,255.0)
shape = torch.from_numpy(np.array(im_shp))
return transform(im).unsqueeze(0), shape.unsqueeze(0) # make a batch of image, shape
def build_model(hypar,device):
net = hypar["model"]#GOSNETINC(3,1)
# convert to half precision
if(hypar["model_digit"]=="half"):
net.half()
for layer in net.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.float()
net.to(device)
if(hypar["restore_model"]!=""):
net.load_state_dict(torch.load(hypar["model_path"]+"/"+hypar["restore_model"], map_location=device))
net.to(device)
net.eval()
return net
def predict(net, inputs_val, shapes_val, hypar, device):
'''
Given an Image, predict the mask
'''
net.eval()
if(hypar["model_digit"]=="full"):
inputs_val = inputs_val.type(torch.FloatTensor)
else:
inputs_val = inputs_val.type(torch.HalfTensor)
inputs_val_v = Variable(inputs_val, requires_grad=False).to(device) # wrap inputs in Variable
ds_val = net(inputs_val_v)[0] # list of 6 results
pred_val = ds_val[0][0,:,:,:] # B x 1 x H x W # we want the first one which is the most accurate prediction
## recover the prediction spatial size to the orignal image size
pred_val = torch.squeeze(F.upsample(torch.unsqueeze(pred_val,0),(shapes_val[0][0],shapes_val[0][1]),mode='bilinear'))
ma = torch.max(pred_val)
mi = torch.min(pred_val)
pred_val = (pred_val-mi)/(ma-mi) # max = 1
if device == 'cuda': torch.cuda.empty_cache()
return (pred_val.detach().cpu().numpy()*255).astype(np.uint8) # it is the mask we need
# Set Parameters
hypar = {} # paramters for inferencing
hypar["model_path"] ="./saved_models" ## load trained weights from this path
hypar["restore_model"] = "isnet.pth" ## name of the to-be-loaded weights
hypar["interm_sup"] = False ## indicate if activate intermediate feature supervision
## choose floating point accuracy --
hypar["model_digit"] = "full" ## indicates "half" or "full" accuracy of float number
hypar["seed"] = 0
hypar["cache_size"] = [1024, 1024] ## cached input spatial resolution, can be configured into different size
## data augmentation parameters ---
hypar["input_size"] = [1024, 1024] ## mdoel input spatial size, usually use the same value hypar["cache_size"], which means we don't further resize the images
hypar["crop_size"] = [1024, 1024] ## random crop size from the input, it is usually set as smaller than hypar["cache_size"], e.g., [920,920] for data augmentation
hypar["model"] = ISNetDIS()
# Build Model
net = build_model(hypar, device)
def inference(image_path):
image_tensor, orig_size = load_image(image_path, hypar)
mask = predict(net, image_tensor, orig_size, hypar, device)
pil_mask = Image.fromarray(mask).convert('L')
im_rgb = Image.open(image_path).convert("RGB")
im_rgba = im_rgb.copy()
im_rgba.putalpha(pil_mask)
file_name = Path(image_path).stem+"_nobg.png"
file_path = Path(Path(image_path).parent,file_name)
im_rgba.save(file_path)
return str(file_path.resolve())
def bw(image_files):
print(image_files)
output = []
for idx, file in enumerate(image_files):
print(file.name)
img = Image.open(file.name)
img = img.convert("L")
output.append(img)
print(output)
return output
def bw_single(image_file):
img = Image.open(image_file)
img = img.convert("L")
return img
def batch(image_files):
output = []
for idx, file in enumerate(image_files):
file = inference(file.name)
output.append(file)
with ZipFile("tmp.zip", "w") as zipObj:
for idx, file in enumerate(output):
zipObj.write(file, file.split("/")[-1])
return output,"tmp.zip"
with gr.Blocks() as iface:
gr.Markdown("# Removedor de Fundo💚")
gr.HTML("Usa <a href='https://github.com/xuebinqin/DIS'>DIS</a> para remover o plano de fundo")
with gr.Tab("Uma Imagem"):
with gr.Row():
with gr.Column():
image = gr.Image(type='filepath')
with gr.Column():
image_output = gr.Image(interactive=False)
with gr.Row():
with gr.Column():
single_removebg = gr.Button("Remover Fundo")
with gr.Column():
single_clear = gr.Button("Limpar")
with gr.Tab("Lote de Imagens"):
with gr.Row():
with gr.Column():
images = gr.File(file_count="multiple", file_types=["image"])
with gr.Column():
gallery = gr.Gallery()
file_list = gr.Files(interactive=False)
with gr.Row():
with gr.Column():
batch_removebg = gr.Button("Processar Lote")
with gr.Column():
batch_clear = gr.Button("Limpar")
with gr.Tab("Sobre"):
with gr.Row():
gr.HTML("Usa Estamos testando esse código usando o <a href='https://github.com/xuebinqin/DIS'>DIS</a> para remover o plano de fundo. Envie sua imagem para que o nosso removedor faça sua parte.")
#Events
single_removebg.click(inference, inputs=image, outputs=image_output)
batch_removebg.click(batch, inputs=images, outputs=[gallery,file_list])
single_clear.click(lambda: None, None, image, queue=False)
batch_clear.click(lambda: None, None, images, queue=False)
iface.launch() |