Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,309 Bytes
8ae56d4 386a133 3125693 8ae56d4 386a133 3a1c951 8ae56d4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import spaces
import gradio as gr
import time
import torch
from PIL import Image
from segment_utils import(
segment_image,
restore_result,
)
from diffusers import (
StableDiffusionXLImg2ImgPipeline
)
BASE_MODEL = "stabilityai/stable-diffusion-xl-base-1.0"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DEFAULT_EDIT_PROMPT = "a beautiful woman with a hollywood style face"
DEFAULT_NEGATIVE_PROMPT = "worst quality, normal quality, low quality, low res, blurry, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch ,duplicate, ugly, monochrome, horror, geometry, mutation, disgusting, poorly drawn face, bad face, fused face, ugly face, worst face, asymmetrical, unrealistic skin texture, bad proportions, out of frame, poorly drawn hands, cloned face, double face"
DEFAULT_CATEGORY = "face"
basepipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained(
BASE_MODEL,
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True,
)
basepipeline = basepipeline.to(DEVICE)
@spaces.GPU(duration=15)
def image_to_image(
input_image: Image,
edit_prompt: str,
seed: int,
num_steps: int,
guidance_scale: float,
):
run_task_time = 0
time_cost_str = ''
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
generator = torch.Generator(device=DEVICE).manual_seed(seed)
generated_image = basepipeline(
generator=generator,
prompt=edit_prompt,
negative_prompt=DEFAULT_NEGATIVE_PROMPT,
image=input_image,
guidance_scale=guidance_scale,
num_inference_steps = num_steps,
).images[0]
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
return generated_image, time_cost_str
def get_time_cost(run_task_time, time_cost_str):
now_time = int(time.time()*1000)
if run_task_time == 0:
time_cost_str = 'start'
else:
if time_cost_str != '':
time_cost_str += f'-->'
time_cost_str += f'{now_time - run_task_time}'
run_task_time = now_time
return run_task_time, time_cost_str
def create_demo() -> gr.Blocks:
with gr.Blocks() as demo:
croper = gr.State()
with gr.Row():
with gr.Column():
edit_prompt = gr.Textbox(lines=1, label="Edit Prompt", value=DEFAULT_EDIT_PROMPT)
generate_size = gr.Number(label="Generate Size", value=1024)
category = gr.Textbox(label="Category", value=DEFAULT_CATEGORY, visible=False)
with gr.Column():
num_steps = gr.Slider(minimum=1, maximum=100, value=30, step=1, label="Num Steps")
guidance_scale = gr.Slider(minimum=0, maximum=30, value=5, step=0.5, label="Guidance Scale")
mask_expansion = gr.Number(label="Mask Expansion", value=50, visible=True)
with gr.Column():
mask_dilation = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Mask Dilation")
seed = gr.Number(label="Seed", value=8)
g_btn = gr.Button("Edit Image")
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Input Image", type="pil")
with gr.Column():
restored_image = gr.Image(label="Restored Image", type="pil", interactive=False)
with gr.Column():
origin_area_image = gr.Image(label="Origin Area Image", type="pil", interactive=False)
generated_image = gr.Image(label="Generated Image", type="pil", interactive=False)
generated_cost = gr.Textbox(label="Time cost by step (ms):", visible=True, interactive=False)
g_btn.click(
fn=segment_image,
inputs=[input_image, category, generate_size, mask_expansion, mask_dilation],
outputs=[origin_area_image, croper],
).success(
fn=image_to_image,
inputs=[origin_area_image, edit_prompt,seed, num_steps, guidance_scale],
outputs=[generated_image, generated_cost],
).success(
fn=restore_result,
inputs=[croper, category, generated_image],
outputs=[restored_image],
)
return demo |