File size: 5,623 Bytes
de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 3672be6 d0fad33 de7a048 9a0839a d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 d0fad33 de7a048 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
import os
import cv2
import gradio as gr
import torch
from basicsr.archs.srvgg_arch import SRVGGNetCompact
from gfpgan.utils import GFPGANer
from huggingface_hub import snapshot_download, hf_hub_download
from realesrgan.utils import RealESRGANer
import examples
REALESRGAN_REPO_ID = 'leonelhs/realesrgan'
GFPGAN_REPO_ID = 'leonelhs/gfpgan'
os.system("pip freeze")
examples.download()
# background enhancer with RealESRGAN
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
model_path = hf_hub_download(repo_id=REALESRGAN_REPO_ID, filename='realesr-general-x4v3.pth')
half = True if torch.cuda.is_available() else False
upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
os.makedirs('output', exist_ok=True)
# def inference(img, version, scale, weight):
def predict(img, version, scale):
# weight /= 100
print(img, version, scale)
if scale > 4:
scale = 4 # avoid too large scale value
try:
extension = os.path.splitext(os.path.basename(str(img)))[1]
img = cv2.imread(img, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] == 4:
img_mode = 'RGBA'
elif len(img.shape) == 2: # for gray inputs
img_mode = None
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
img_mode = None
h, w = img.shape[0:2]
if h > 3500 or w > 3500:
print('too large size')
return None, None
if h < 300:
img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
face_enhancer = None
snapshot_folder = snapshot_download(repo_id=GFPGAN_REPO_ID)
if version == 'v1.2':
path = os.path.join(snapshot_folder, 'GFPGANv1.2.pth')
face_enhancer = GFPGANer(
model_path=path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
elif version == 'v1.3':
path = os.path.join(snapshot_folder, 'GFPGANv1.3.pth')
face_enhancer = GFPGANer(
model_path=path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
elif version == 'v1.4':
path = os.path.join(snapshot_folder, 'GFPGANv1.4.pth')
face_enhancer = GFPGANer(
model_path=path, upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
elif version == 'RestoreFormer':
path = os.path.join(snapshot_folder, 'RestoreFormer.pth')
face_enhancer = GFPGANer(
model_path=path, upscale=2, arch='RestoreFormer', channel_multiplier=2,
bg_upsampler=upsampler)
try:
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
except RuntimeError as error:
print('Error', error)
try:
if scale != 2:
interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
h, w = img.shape[0:2]
output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
except Exception as error:
print('wrong scale input.', error)
if img_mode == 'RGBA': # RGBA images should be saved in png format
extension = 'png'
else:
extension = 'jpg'
save_path = f'output/out.{extension}'
cv2.imwrite(save_path, output)
output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
return output, save_path
except Exception as error:
print('global exception', error)
return None, None
title = "GFPGAN: Practical Face Restoration Algorithm"
description = r"""Gradio demo for <a href='https://github.com/TencentARC/GFPGAN' target='_blank'><b>GFPGAN: Towards Real-World Blind Face Restoration with Generative Facial Prior</b></a>.<br>
It can be used to restore your **old photos** or improve **AI-generated faces**.<br>
To use it, simply upload your image.<br>
If GFPGAN is helpful, please help to β the <a href='https://github.com/TencentARC/GFPGAN' target='_blank'>Github Repo</a> and recommend it to your friends π
"""
article = r"""
[![download](https://img.shields.io/github/downloads/TencentARC/GFPGAN/total.svg)](https://github.com/TencentARC/GFPGAN/releases)
[![GitHub Stars](https://img.shields.io/github/stars/TencentARC/GFPGAN?style=social)](https://github.com/TencentARC/GFPGAN)
[![arXiv](https://img.shields.io/badge/arXiv-Paper-<COLOR>.svg)](https://arxiv.org/abs/2101.04061)
If you have any question, please email π§ `[email protected]` or `[email protected]`.
<center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_GFPGAN' alt='visitor badge'></center>
<center><img src='https://visitor-badge.glitch.me/badge?page_id=Gradio_Xintao_GFPGAN' alt='visitor badge'></center>
"""
demo = gr.Interface(
predict, [
gr.Image(type="filepath", label="Input"),
gr.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer'], type="value", value='v1.4', label='version'),
gr.Number(label="Rescaling factor", value=2),
], [
gr.Image(type="numpy", label="Output (The whole image)"),
gr.File(label="Download the output image")
],
title=title,
description=description,
article=article,
examples=[['AI-generate.jpg', 'v1.4', 2],
['lincoln.jpg', 'v1.4', 2],
['Blake_Lively.jpg', 'v1.4', 2],
['10045.png', 'v1.4', 2]])
demo.queue().launch()
|