import subprocess
from PIL import Image
import gradio as gr
import os
# First, running the inversion command to obtain the input noise that will reconstruct the image.
# It will save the inversion as output/test_cat/inversion/image-name.pt
# BLIP-generated caption prompt is saved as output/test_cat/prompt/image-name.txt - eg., a painting of a cat sitting on top of a ball
def inversion(image_in): #, progress=gr.Progress(track_tqdm=True)):
#progress(0, desc="Starting...")
# saving the input image
image_in.save("input_image.png") #("assets/test_images/cats/input_image.png")
# Run the script file
subprocess.run(["python", "src/inversion.py", "--input_image", "input_image.png", "--results_folder", "output/test_cat"])
# Open the text file with blip caption
with open("output/test_cat/prompt/input_image.txt", "r") as file:
# Read the file
prompt = file.read()
return "output/test_cat/inversion/input_image.pt", prompt
# Performing image editing with the editing directions
# This will save the edited image as output/test_cat/edit/image-name.png
def image_edit(task_name): #, progress=gr.Progress(track_tqdm=True)):
#progress(0, desc="Starting...")
# Run the script file
subprocess.run(["python", "src/edit_real.py", "--inversion", "output/test_cat/inversion/input_image.pt",
"--prompt", "output/test_cat/prompt/input_image.txt", "--task_name", task_name,
"--results_folder", "output/test_cat/"])
return "output/test_cat/edit/input_image.png"
#Similarly, we can edit the synthetic images generated by Stable Diffusion with the following command.
def synthetic_image_edit(prompt, task_name): #, progress=gr.Progress(track_tqdm=True)):
#progress(0, desc="Starting...")
# Run the script file
subprocess.run(["python", "src/edit_synthetic.py", "--prompt_str", prompt,
"--task", task_name, "--results_folder", "output/synth_editing"])
return "output/synth_editing/reconstruction.png", "output/synth_editing/edit.png"
def set_visible_true():
return gr.update(visible=True)
def set_visible_False():
return gr.update(visible=False)
#Gradio Blocks API
with gr.Blocks() as demo:
gr.HTML("""
Pix2Pix - Zero-shot Image-to-Image Translation
This is an unofficial demo for Pix2PixZero.
Please visit their website and github repo for more details.
""")
gr.HTML("""Duplicate this Space and upgrade to a GPU for fast Inference & no queue
""")
direction_html = gr.HTML(value="👇Upload a Dog or a Cat image here to get started👇
", visible=True)
refresh_direction_html = gr.HTML(value="👆Click the 'App' button to Refresh the space and try translation again with another Image
", visible=False)
with gr.Row():
image_in = gr.Image(type="pil", label="Start by uploading an image of a Cat or a Dog that you want to translate")
#with gr.Box():
# gr.Examples( examples=[os.path.join(os.path.dirname(__file__), "dog11.jpg"), os.path.join(os.path.dirname(__file__), "cat11.jpg")],
# inputs=image_in,)
# #fn=inversion,)
with gr.Column():
btn_inversion = gr.Button("Get input noise and image caption",visible=False )
with gr.Row():
blip_prompt = gr.Textbox(visible=False)
inversion_file = gr.File(visible=False)
#task_name = gr.Textbox()
with gr.Row():
image_out = gr.Image(visible=False, label="Translated Image output")
with gr.Column():
task_name_radio = gr.Radio(choices = ["cat2dog", "dog2cat",], type="value", visible=False, label="Select a task that you want to accomplish") #, value="cat2dog"),
btn_imageedit = gr.Button(value="Translate the image!",visible=False)
html_tag = gr.HTML(value="""
🤩You can also Generate images with Stable Diffusion and 🚀Translate them on the fly🔥 (zero-shot) using Pix2PixZero. Try this below -
Example - type a prompt like 'A small cat sitting on a blue ball', select the task as 'cat->dog' in this case, and press the button.""", visible=False)
prompt_synth = gr.Textbox(visible=False, label="Type in a prompt to generate an Image using SD", placeholder="A small cat sitting on a blue ball")
btn_synth_image = gr.Button(value="Generate & Translate the SD image",visible=False)
with gr.Row():
image_synth = gr.Image(visible=False, label="Synthetic image generated by Stable Diffusion on the fly")
image_synth_translated = gr.Image(visible=False, label="Translated synthetic image")
image_in.change(set_visible_False, [], direction_html)
btn_inversion.click(inversion,[image_in],[inversion_file, blip_prompt])
#btn_inversion.click(set_visible_true, [], task_name_radio) #inversion_file, blip_prompt,
btn_inversion.click(set_visible_False, [], btn_inversion)
inversion_file.change(set_visible_true, [], task_name_radio) #inversion_file, blip_prompt,
#task_initial_radio.change(set_visible_true, [], btn_imageedit)
task_name_radio.change(set_visible_true, [], btn_imageedit)
task_name_radio.change(set_visible_true, [], image_out)
#task_name_radio.change(set_visible_true, [], html_tag)
btn_imageedit.click(image_edit,[task_name_radio],[image_out])
btn_imageedit.click(set_visible_False, [], btn_imageedit)
btn_imageedit.click(set_visible_true, [], html_tag)
btn_imageedit.click(set_visible_true, [], prompt_synth)
btn_imageedit.click(set_visible_true, [], btn_synth_image)
btn_imageedit.click(set_visible_true, [], image_synth)
btn_imageedit.click(set_visible_true, [], image_synth_translated)
image_out.change(set_visible_true, [], refresh_direction_html)
btn_synth_image.click(synthetic_image_edit,[prompt_synth, task_name_radio],[image_synth, image_synth_translated])
image_in.clear(set_visible_true, [], btn_inversion)
image_in.change(set_visible_true, [], btn_inversion)
image_in.change(set_visible_true, [], blip_prompt)
image_in.change(set_visible_true, [], inversion_file)
demo.queue(concurrency_count=3)
demo.launch(debug=True)