import gradio as gr from gradio_client import Client, handle_file from huggingface_hub import HfApi import time api = HfApi() repo_ids = ["HuggingFaceH4/idefics2-8b-playground", "HuggingFaceH4/idefics2-8b-vdpoed-playground"] for repo_id in repo_ids: if api.space_info(repo_id).runtime.stage != "RUNNING": api.restart_space(repo_id="HuggingFaceH4/idefics2-8b-playground") for repo_id in repo_ids: if api.space_info(repo_id).runtime.stage != "RUNNING": time.sleep(1) client_idefics2 = Client("HuggingFaceH4/idefics2-8b-playground") client_idefics2_dpoed = Client("HuggingFaceH4/idefics2-8b-vdpoed-playground") def respond(multimodal_input): x = {"text": multimodal_input["text"], "files": [handle_file(file) for file in multimodal_input["files"]]} text_1 = client_idefics2.predict(x, api_name="/predict") text_2 = client_idefics2_dpoed.predict(x, api_name="/predict") return text_1, text_2 gr.Interface( respond, title="Compare IDEFICS2-8B Against DPO", description="", inputs=[gr.MultimodalTextbox(file_types=["image"], show_label=False)], outputs=[gr.Textbox(label="idefics2-8b"), gr.Textbox(label="idefics2-8b-dpoed")] examples=[{"text": "Describe this image in detail.", "files": [{path: "./bee.jpg"]"}], ).launch()