DrHakase's picture
Update app.py
1c2402e
raw
history blame contribute delete
No virus
19.3 kB
import random
import gradio as gr
import imageio
import numpy as np
import onnx
import onnxruntime as rt
import huggingface_hub
from numpy.random import RandomState
from skimage import transform
def get_inter(r1, r2):
h_inter = max(min(r1[3], r2[3]) - max(r1[1], r2[1]), 0)
w_inter = max(min(r1[2], r2[2]) - max(r1[0], r2[0]), 0)
return h_inter * w_inter
def iou(r1, r2):
s1 = (r1[2] - r1[0]) * (r1[3] - r1[1])
s2 = (r2[2] - r2[0]) * (r2[3] - r2[1])
i = get_inter(r1, r2)
return i / (s1 + s2 - i)
def letterbox(im, new_shape=(640, 640), color=(0.5, 0.5, 0.5), stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
# Compute padding
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape != new_unpad: # resize
im = transform.resize(im, (new_unpad[1], new_unpad[0]))
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im_new = np.full((new_unpad[1] + top + bottom, new_unpad[0] + left + right, 3), color, dtype=np.float32)
im_new[top:new_unpad[1] + top, left:new_unpad[0] + left] = im
return im_new
def nms(pred, conf_thres, iou_thres, max_instance=20): # pred (anchor_num, 5 + cls_num)
nc = pred.shape[1] - 5
candidates = [list() for x in range(nc)]
for x in pred:
if x[4] < conf_thres:
continue
cls = np.argmax(x[5:])
p = x[4] * x[5 + cls]
if conf_thres <= p:
box = (x[0] - x[2] / 2, x[1] - x[3] / 2, x[0] + x[2] / 2, x[1] + x[3] / 2) # xywh2xyxy
candidates[cls].append([p, box])
result = [list() for x in range(nc)]
for i, candidate in enumerate(candidates):
candidate = sorted(candidate, key=lambda a: a[0], reverse=True)
candidate = candidate[:max_instance]
for x in candidate:
ok = True
for r in result[i]:
if iou(r[1], x[1]) > iou_thres:
ok = False
break
if ok:
result[i].append(x)
return result
class Model:
def __init__(self):
self.detector = None
self.encoder = None
self.g_synthesis = None
self.g_mapping = None
self.detector_stride = None
self.detector_imgsz = None
self.detector_class_names = None
self.anime_seg = None
self.w_avg = None
self.load_models()
def load_models(self):
g_mapping_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "g_mapping.onnx")
g_synthesis_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "g_synthesis.onnx")
encoder_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "encoder.onnx")
detector_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "waifu_dect.onnx")
anime_seg_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
providers = ['CPUExecutionProvider']
gpu_providers = ['CUDAExecutionProvider']
g_mapping = onnx.load(g_mapping_path)
w_avg = [x for x in g_mapping.graph.initializer if x.name == "w_avg"][0]
w_avg = np.frombuffer(w_avg.raw_data, dtype=np.float32)[np.newaxis, :]
w_avg = w_avg.repeat(16, axis=0)[np.newaxis, :]
self.w_avg = w_avg
self.g_mapping = rt.InferenceSession(g_mapping_path, providers=gpu_providers + providers)
self.g_synthesis = rt.InferenceSession(g_synthesis_path, providers=gpu_providers + providers)
self.encoder = rt.InferenceSession(encoder_path, providers=providers)
self.detector = rt.InferenceSession(detector_path, providers=providers)
detector_meta = self.detector.get_modelmeta().custom_metadata_map
self.detector_stride = int(detector_meta['stride'])
self.detector_imgsz = 1088
self.detector_class_names = eval(detector_meta['names'])
self.anime_seg = rt.InferenceSession(anime_seg_path, providers=providers)
def get_img(self, w, noise=0):
img = self.g_synthesis.run(None, {'w': w, "noise": np.asarray([noise], dtype=np.float32)})[0]
return (img.transpose(0, 2, 3, 1) * 127.5 + 128).clip(0, 255).astype(np.uint8)[0]
def get_w(self, z, psi1, psi2):
return self.g_mapping.run(None, {'z': z, 'psi': np.asarray([psi1, psi2], dtype=np.float32)})[0]
def remove_bg(self, img, s=1024):
img0 = img
img = (img / 255).astype(np.float32)
h, w = h0, w0 = img.shape[:-1]
h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
ph, pw = s - h, s - w
img_input = np.zeros([s, s, 3], dtype=np.float32)
img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = transform.resize(img, (h, w))
img_input = np.transpose(img_input, (2, 0, 1))
img_input = img_input[np.newaxis, :]
mask = self.anime_seg.run(None, {'img': img_input})[0][0]
mask = np.transpose(mask, (1, 2, 0))
mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
mask = transform.resize(mask, (h0, w0))
img0 = (img0 * mask + 255 * (1 - mask)).astype(np.uint8)
return img0
def encode_img(self, img):
img = transform.resize(((img / 255 - 0.5) / 0.5), (256, 256)).transpose(2, 0, 1)[np.newaxis, :].astype(
np.float32)
return self.encoder.run(None, {'img': img})[0] + self.w_avg
def detect(self, im0, conf_thres, iou_thres, detail=False):
if im0 is None:
return []
img = letterbox((im0 / 255).astype(np.float32), (self.detector_imgsz, self.detector_imgsz),
stride=self.detector_stride)
# Convert
img = img.transpose(2, 0, 1)
img = img[np.newaxis, :]
pred = self.detector.run(None, {'images': img})[0][0]
dets = nms(pred, conf_thres, iou_thres)
imgs = []
# Print results
s = '%gx%g ' % img.shape[2:] # print string
for i, det in enumerate(dets):
n = len(det)
s += f"{n} {self.detector_class_names[i]}{'s' * (n > 1)}, " # add to string
if detail:
print(s)
waifu_rects = []
head_rects = []
body_rects = []
for i, det in enumerate(dets):
for x in det:
# Rescale boxes from img_size to im0 size
wr = im0.shape[1] / img.shape[3]
hr = im0.shape[0] / img.shape[2]
x[1] = (int(x[1][0] * wr), int(x[1][1] * hr),
int(x[1][2] * wr), int(x[1][3] * hr))
if i == 0:
head_rects.append(x[1])
elif i == 1:
body_rects.append(x[1])
elif i == 2:
waifu_rects.append(x[1])
for j, waifu_rect in enumerate(waifu_rects):
msg = f'waifu {j + 1} '
head_num = 0
body_num = 0
hr, br = None, None
for r in head_rects:
if get_inter(r, waifu_rect) / ((r[2] - r[0]) * (r[3] - r[1])) > 0.75:
hr = r
head_num += 1
if head_num != 1:
if detail:
print(msg + f'head num error: {head_num}')
continue
for r in body_rects:
if get_inter(r, waifu_rect) / ((r[2] - r[0]) * (r[3] - r[1])) > 0.65:
br = r
body_num += 1
if body_num != 1:
if detail:
print(msg + f'body num error: {body_num}')
continue
bounds = (min(waifu_rect[0], hr[0], br[0]),
min(waifu_rect[1], hr[1], br[1]),
max(waifu_rect[2], hr[2], br[2]),
max(waifu_rect[3], hr[3], br[3]))
if (bounds[2] - bounds[0]) / (bounds[3] - bounds[1]) > 0.7:
if detail:
print(msg + "ratio out of limit")
continue
expand_pixel = (bounds[3] - bounds[1]) // 20
bounds = [max(bounds[0] - expand_pixel // 2, 0),
max(bounds[1] - expand_pixel, 0),
min(bounds[2] + expand_pixel // 2, im0.shape[1]),
min(bounds[3] + expand_pixel, im0.shape[0]),
]
# corp and resize
w = bounds[2] - bounds[0]
h = bounds[3] - bounds[1]
bounds[3] += h % 2
h += h % 2
r = min(512 / w, 1024 / h)
pw, ph = int(512 / r - w), int(1024 / r - h)
bounds_tmp = (bounds[0] - pw // 2, bounds[1] - ph // 2,
bounds[2] + pw // 2 + pw % 2, bounds[3] + ph // 2 + ph % 2)
bounds = (max(0, bounds_tmp[0]), max(0, bounds_tmp[1]),
min(im0.shape[1], bounds_tmp[2]), min(im0.shape[0], bounds_tmp[3]))
dl = bounds[0] - bounds_tmp[0]
dr = bounds[2] - bounds_tmp[2]
dt = bounds[1] - bounds_tmp[1]
db = bounds[3] - bounds_tmp[3]
w = bounds_tmp[2] - bounds_tmp[0]
h = bounds_tmp[3] - bounds_tmp[1]
temp_img = np.full((h, w, 3), 255, dtype=np.uint8)
temp_img[dt:h + db, dl:w + dr] = im0[bounds[1]:bounds[3], bounds[0]:bounds[2]]
temp_img = transform.resize(temp_img, (1024, 512), preserve_range=True).astype(np.uint8)
imgs.append(temp_img)
return imgs
# video 1-2 style
def gen_video(self, w1, w2, noise, path, frame_num=10):
video = imageio.get_writer(path, mode='I', fps=frame_num // 2, codec='libx264', bitrate='16M')
lin = np.linspace(0, 1, frame_num)
for i in range(0, frame_num):
img = self.get_img(((1 - lin[i]) * w1) + (lin[i] * w2), noise)
video.append_data(img)
video.close()
# video 1-2-1 style
def gen_video2(self, w1, w2, noise, path, frame_num=10):
video = imageio.get_writer(path, mode='I', fps=frame_num // 2, codec='libx264', bitrate='16M')
lin = np.linspace(0, 1, frame_num)
for i in range(0, frame_num):
img = self.get_img(((1 - lin[i]) * w1) + (lin[i] * w2), noise)
video.append_data(img)
for i in reversed(range(0, frame_num)):
img = self.get_img(((1 - lin[i]) * w1) + (lin[i] * w2), noise)
video.append_data(img)
video.close()
def get_thumbnail(img):
img_new = np.full((256, 384, 3), 200, dtype=np.uint8)
img_new[:, 128:256] = transform.resize(img, (256, 128), preserve_range=True)
return img_new
def gen_fn(seed, random_seed, psi1, psi2, noise):
if random_seed:
seed = random.randint(0, 2 ** 32 - 1)
z = RandomState(int(seed)).randn(1, 1024)
w = model.get_w(z.astype(dtype=np.float32), psi1, psi2)
img_out = model.get_img(w, noise)
return img_out, seed, w, get_thumbnail(img_out)
def encode_img_fn(img, noise):
if img is None:
return "please upload a image", None, None, None, None
img = model.remove_bg(img)
imgs = model.detect(img, 0.2, 0.03)
if len(imgs) == 0:
return "failed to detect anime character", None, None, None, None
w = model.encode_img(imgs[0])
img_out = model.get_img(w, noise)
return "success", imgs[0], img_out, w, get_thumbnail(img_out)
def gen_video_fn(w1, w2, noise, frame):
if w1 is None or w2 is None:
return None
# model.gen_video(w1, w2, noise, "video.mp4", int(frame))
model.gen_video2(w1, w2, noise, "video.mp4", int(frame))
return "video.mp4"
if __name__ == '__main__':
model = Model()
app = gr.Blocks()
with app:
gr.Markdown("# full-body anime GAN\n\n"
"![visitor badge](https://visitor-badge.glitch.me/badge?page_id=o_ob.hf.full-body-anime-gan)\n"
"fork from [skytnt](https://huggingface.co/spaces/skytnt/full-body-anime-gan)\n\n"
"Image generation and blending using StyleGAN3 (not text2image, not Stable Diffusion)\n"
"psi1, psi2 are mapping parameters from nskytnt/fbanime-gan. The psi2 seems to have an effect on clothing, and the psi1 seems to have an effect on sexual styles such as breast enhancement [my experiment results](https://twitter.com/o_ob/status/1607860668543401984).\n"
"The video generation generates mp4 with the pattern 1→2→1 for easy comparison.\n\n"
"- StyleGAN3を使った画像生成とブレンドです(text2image, Stable Diffusionではありません)\n"
"- psi1,2は[nskytnt/fbanime-gan](https://github.com/SkyTNT/fbanimegan/tree/main/stylegan3)のmappingパラメータです。\n"
"- psi2は服に影響があり、psi1は胸の強調など性癖っぽいスタイルに影響があるようです([実験結果](https://twitter.com/o_ob/status/1607860668543401984))\n"
"- 動画生成は比較しやすいように 1→2→1 というパターンでmp4を生成します。\n")
with gr.Tabs():
with gr.TabItem("generate image 新規画像生成"):
with gr.Row():
with gr.Column():
gr.Markdown("generate image")
with gr.Row():
gen_input1 = gr.Slider(minimum=0, maximum=2 ** 32 - 1, step=1, value=0, label="seed")
gen_input2 = gr.Checkbox(label="Random", value=True)
gen_input3 = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="truncation psi 1")
gen_input4 = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="truncation psi 2")
gen_input5 = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="noise strength")
with gr.Group():
gen_submit = gr.Button("Generate", variant="primary")
with gr.Column():
gen_output1 = gr.Image(label="output image")
select_img_input_w1 = gr.Variable()
select_img_input_img1 = gr.Variable()
with gr.TabItem("encode image 画像からエンコード"):
with gr.Row():
with gr.Column():
gr.Markdown("you'd better upload a standing full-body image 完全な立ち絵の画像をアップロードしてください")
encode_img_input = gr.Image(label="input image")
examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 7)]
encode_img_examples = gr.Dataset(components=[encode_img_input], samples=examples_data)
with gr.Group():
encode_img_submit = gr.Button("Run", variant="primary")
with gr.Column():
encode_img_output1 = gr.Textbox(label="output message")
with gr.Row():
encode_img_output2 = gr.Image(label="detected")
encode_img_output3 = gr.Image(label="encoded")
select_img_input_w2 = gr.Variable()
select_img_input_img2 = gr.Variable()
with gr.TabItem("generate video ビデオ合成"):
with gr.Row():
with gr.Column():
gr.Markdown("generate video between 2 images 2つの画像からビデオを生成します")
with gr.Row():
with gr.Column():
select_img1_dropdown = gr.Radio(label="Select image 1", value="current generated image 現在の生成画像から",
choices=["current generated image",
"current encoded image"], type="index")
with gr.Group():
select_img1_button = gr.Button("Select", variant="primary")
select_img1_output_img = gr.Image(label="selected image 1")
select_img1_output_w = gr.Variable()
with gr.Column():
select_img2_dropdown = gr.Radio(label="Select image 2", value="current generated image 現在の生成画像から",
choices=["current generated image",
"current encoded image"], type="index")
with gr.Group():
select_img2_button = gr.Button("Select", variant="primary")
select_img2_output_img = gr.Image(label="selected image 2")
select_img2_output_w = gr.Variable()
generate_video_frame = gr.Slider(minimum=10, maximum=30, step=1, label="frame", value=15)
with gr.Group():
generate_video_button = gr.Button("Generate", variant="primary")
with gr.Column():
generate_video_output = gr.Video(label="output video")
gen_submit.click(gen_fn, [gen_input1, gen_input2, gen_input3, gen_input4, gen_input5],
[gen_output1, gen_input1, select_img_input_w1, select_img_input_img1])
encode_img_submit.click(encode_img_fn, [encode_img_input, gen_input5],
[encode_img_output1, encode_img_output2, encode_img_output3, select_img_input_w2,
select_img_input_img2])
encode_img_examples.click(lambda x: x[0], [encode_img_examples], [encode_img_input])
select_img1_button.click(lambda i, img1, img2, w1, w2: (img1, w1) if i == 0 else (img2, w2),
[select_img1_dropdown, select_img_input_img1, select_img_input_img2,
select_img_input_w1, select_img_input_w2],
[select_img1_output_img, select_img1_output_w])
select_img2_button.click(lambda i, img1, img2, w1, w2: (img1, w1) if i == 0 else (img2, w2),
[select_img2_dropdown, select_img_input_img1, select_img_input_img2,
select_img_input_w1, select_img_input_w2],
[select_img2_output_img, select_img2_output_w])
generate_video_button.click(gen_video_fn,
[select_img1_output_w, select_img2_output_w, gen_input5, generate_video_frame],
[generate_video_output])
app.launch()