#!/usr/bin/env python from __future__ import annotations import pathlib import gradio as gr from model import Model DESCRIPTION = """# ĐÂY LÀ ỨNG DỤNG ĐANG THỬ NGHIỆM CỦA TDNM """ def load_hairstyle_list() -> list[str]: with open("HairCLIP/mapper/hairstyle_list.txt") as f: lines = [line.strip() for line in f.readlines()] lines = [line[:-10] for line in lines] return lines def set_example_image(example: list) -> dict: return gr.Image(value=example[0]) def update_step2_components(choice: str) -> tuple[dict, dict]: return ( gr.Dropdown(visible=choice in ["Kiểu tóc", "Cả hai"]), gr.Textbox(visible=choice in ["Màu tóc", "Cả hai"]), ) model = Model() with gr.Blocks(css="style.css") as demo: gr.Markdown(DESCRIPTION) with gr.Group(): gr.Markdown("## Bước 1") with gr.Row(): with gr.Column(): with gr.Row(): input_image = gr.Image(label="Ảnh chân dung", type="filepath") with gr.Row(): preprocess_button = gr.Button("Preprocess") with gr.Column(): aligned_face = gr.Image(label="Aligned Face", type="pil", interactive=False) with gr.Column(): reconstructed_face = gr.Image(label="Reconstructed Face", type="numpy") latent = gr.State() with gr.Row(): paths = sorted(pathlib.Path("images").glob("*.jpg")) gr.Examples(examples=[[path.as_posix()] for path in paths], inputs=input_image) with gr.Group(): gr.Markdown("## Step 2") with gr.Row(): with gr.Column(): with gr.Row(): editing_type = gr.Radio( label="Chọn loại thay đổi", choices=["Kiểu tóc", "Màu tóc", "Cả hai"], value="both", type="value" ) with gr.Row(): hairstyles = load_hairstyle_list() hairstyle_index = gr.Dropdown(label="Kiểu tóc", choices=hairstyles, value="afro", type="index") with gr.Row(): color_description = gr.Textbox(label="Màu tóc", value="red") with gr.Row(): run_button = gr.Button("Run") with gr.Column(): result = gr.Image(label="Result") preprocess_button.click(fn=model.detect_and_align_face, inputs=input_image, outputs=aligned_face) aligned_face.change(fn=model.reconstruct_face, inputs=aligned_face, outputs=[reconstructed_face, latent]) editing_type.change(fn=update_step2_components, inputs=editing_type, outputs=[hairstyle_index, color_description]) run_button.click( fn=model.generate, inputs=[ editing_type, hairstyle_index, color_description, latent, ], outputs=result, ) if __name__ == "__main__": demo.queue(max_size=10).launch()