Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,499 Bytes
bdf9962 fc91aa0 bdf9962 fc91aa0 bdf9962 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
from PIL import Image
def create_gradio_interface(process_and_generate):
def gradio_process_and_generate(input_image, prompt, num_images, cfg_weight):
return process_and_generate(input_image, prompt, num_images, cfg_weight)
explanation = """Janus 1.3B uses a differerent visual encoder for understanding and generation.
![Janus Model Architecture](file/images/janus_architecture.svg)
Here, by feeding the model an image and then asking it to generate that same image, we visualize the model's ability to translate input (understanding) embedding space to generative embedding space."""
with gr.Blocks() as demo:
gr.Markdown("# How Janus-1.3B sees itself")
with gr.Row():
input_image = gr.Image(type="filepath", label="Input Image")
output_images = gr.Gallery(label="Generated Images", columns=2, rows=2)
prompt = gr.Textbox(label="Prompt", value="Exactly what is shown in the image.")
num_images = gr.Slider(minimum=1, maximum=12, value=12, step=1, label="Number of Images to Generate")
cfg_weight = gr.Slider(minimum=1, maximum=10, value=5, step=0.1, label="CFG Weight")
generate_btn = gr.Button("Generate", variant="primary", size="lg")
generate_btn.click(
fn=gradio_process_and_generate,
inputs=[input_image, prompt, num_images, cfg_weight],
outputs=output_images
)
gr.Markdown(explanation)
return demo
|