import gradio as gr
import spaces
import os
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
import torch
#Hack for ZeroGPU
torch.jit.script = lambda f: f
####
base_model="frankjoshua/albedobaseXL_v13"
from omni_zero import OmniZeroCouple
omni_zero = OmniZeroCouple(
base_model="frankjoshua/albedobaseXL_v13",
device="cuda",
)
@spaces.GPU()
def generate(
base_image="https://cdn-prod.styleof.com/inferences/cm1ho5cjl14nh14jec6phg2h8/i6k59e7gpsr45ufc7l8kun0g-medium.jpeg",
style_image="https://cdn-prod.styleof.com/inferences/cm1ho5cjl14nh14jec6phg2h8/i6k59e7gpsr45ufc7l8kun0g-medium.jpeg",
identity_image_1="https://cdn-prod.styleof.com/inferences/cm1hp4lea14oz14jeoghnex7g/dlgc5xwo0qzey7qaixy45i1o-medium.jpeg",
identity_image_2="https://cdn-prod.styleof.com/inferences/cm1ho69ha14np14jesnusqiep/mp3aaktzqz20ujco5i3bi5s1-medium.jpeg",
seed=42,
prompt="Cinematic still photo of a couple. emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo, sharp focus, high budget, cinemascope, moody, epic, gorgeous, film grain, grainy",
negative_prompt="anime, cartoon, graphic, (blur, blurry, bokeh), text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
guidance_scale=3.0,
number_of_images=1,
number_of_steps=10,
base_image_strength=0.3,
style_image_strength=1.0,
identity_image_strength_1=1.0,
identity_image_strength_2=1.0,
depth_image=None,
depth_image_strength=0.2,
mask_guidance_start=0.0,
mask_guidance_end=1.0,
progress=gr.Progress(track_tqdm=True)
):
images = omni_zero.generate(
seed=seed,
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
number_of_images=number_of_images,
number_of_steps=number_of_steps,
base_image=base_image,
base_image_strength=base_image_strength,
style_image=style_image,
style_image_strength=style_image_strength,
identity_image_1=identity_image_1,
identity_image_strength_1=identity_image_strength_1,
identity_image_2=identity_image_2,
identity_image_strength_2=identity_image_strength_2,
depth_image=depth_image,
depth_image_strength=depth_image_strength,
mask_guidance_start=mask_guidance_start,
mask_guidance_end=mask_guidance_end,
)
return images
#Move the components in the example fields outside so they are available when gr.Examples is instantiated
with gr.Blocks() as demo:
gr.Markdown("
Omni Zero
")
gr.Markdown("A diffusion pipeline for zero-shot stylized portrait creation [GitHub], [StyleOf Remix Yourself]
")
with gr.Row():
with gr.Column():
with gr.Row():
prompt = gr.Textbox(label="Prompt", value="A person")
with gr.Row():
negative_prompt = gr.Textbox(label="Negative Prompt", value="blurry, out of focus")
with gr.Row():
with gr.Column(min_width=140):
with gr.Row():
composition_image = gr.Image(label="Composition")
with gr.Row():
composition_image_strength = gr.Slider(label="Strength",step=0.01, minimum=0.0, maximum=1.0, value=1.0)
#with gr.Row():
with gr.Column(min_width=140):
with gr.Row():
style_image = gr.Image(label="Style Image")
with gr.Row():
style_image_strength = gr.Slider(label="Strength",step=0.01, minimum=0.0, maximum=1.0, value=1.0)
with gr.Column(min_width=140):
with gr.Row():
identity_image = gr.Image(label="Identity Image")
with gr.Row():
identity_image_strength = gr.Slider(label="Strenght",step=0.01, minimum=0.0, maximum=1.0, value=1.0)
with gr.Accordion("Advanced options", open=False):
with gr.Row():
with gr.Column(min_width=140):
with gr.Row():
base_image = gr.Image(label="Base Image")
with gr.Row():
base_image_strength = gr.Slider(label="Strength",step=0.01, minimum=0.0, maximum=1.0, value=0.15, min_width=120)
# with gr.Column(min_width=140):
# with gr.Row():
# depth_image = gr.Image(label="depth_image", value=None)
# with gr.Row():
# depth_image_strength = gr.Slider(label="depth_image_strength",step=0.01, minimum=0.0, maximum=1.0, value=0.5)
with gr.Row():
seed = gr.Slider(label="Seed",step=1, minimum=0, maximum=10000000, value=42)
number_of_images = gr.Slider(label="Number of Outputs",step=1, minimum=1, maximum=4, value=1)
with gr.Row():
guidance_scale = gr.Slider(label="Guidance Scale",step=0.1, minimum=0.0, maximum=14.0, value=3.0)
number_of_steps = gr.Slider(label="Number of Steps",step=1, minimum=1, maximum=50, value=10)
with gr.Column():
with gr.Row():
out = gr.Gallery(label="Output(s)")
with gr.Row():
# clear = gr.Button("Clear")
submit = gr.Button("Generate")
submit.click(generate, inputs=[
prompt,
composition_image,
style_image,
identity_image,
base_image,
seed,
negative_prompt,
guidance_scale,
number_of_images,
number_of_steps,
base_image_strength,
composition_image_strength,
style_image_strength,
identity_image_strength,
],
outputs=[out]
)
# clear.click(lambda: None, None, chatbot, queue=False)
gr.Examples(
examples=[["A person", "https://github.com/okaris/omni-zero/assets/1448702/2ca63443-c7f3-4ba6-95c1-2a341414865f", "https://github.com/okaris/omni-zero/assets/1448702/64dc150b-f683-41b1-be23-b6a52c771584", "https://github.com/okaris/omni-zero/assets/1448702/ba193a3a-f90e-4461-848a-560454531c58"]],
inputs=[prompt, composition_image, style_image, identity_image],
outputs=[out],
fn=generate,
cache_examples="lazy",
)
if __name__ == "__main__":
demo.launch()