AnimeGANv2 / app.py
Ahsen Khaliq
Update app.py
442b118
raw
history blame
1.57 kB
import os
os.system("git clone https://github.com/bryandlee/animegan2-pytorch")
os.system("gdown https://drive.google.com/uc?id=1WK5Mdt6mwlcsqCZMHkCUSDJxN1UyFi0-")
os.system("gdown https://drive.google.com/uc?id=18H3iK09_d54qEDoWIc82SyWB2xun4gjU")
#os.system("pip install dlib")
import sys
sys.path.append("animegan2-pytorch")
import torch
torch.set_grad_enabled(False)
from model import Generator
device = "cpu"
model = Generator().eval().to(device)
model.load_state_dict(torch.load("face_paint_512_v2_0.pt"))
from PIL import Image
from torchvision.transforms.functional import to_tensor, to_pil_image
import gradio as gr
def face2paint(
img: Image.Image,
size: int,
side_by_side: bool = False,
) -> Image.Image:
w, h = img.size
s = min(w, h)
img = img.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
img = img.resize((size, size), Image.LANCZOS)
input = to_tensor(img).unsqueeze(0) * 2 - 1
output = model(input.to(device)).cpu()[0]
if side_by_side:
output = torch.cat([input[0], output], dim=2)
output = (output * 0.5 + 0.5).clip(0, 1)
return to_pil_image(output)
import os
#import dlib
import collections
from typing import Union, List
import numpy as np
from PIL import Image
import PIL.Image
import PIL.ImageFile
import numpy as np
import scipy.ndimage
import requests
def inference(image):
img = image
out = face2paint(img, 512)
return out
iface = gr.Interface(inference, gr.inputs.Image(type="pil"), gr.outputs.Image(type="pil"))
iface.launch()