import gradio as gr import torch from gradio_depth_pred import create_demo as create_depth_pred_demo css = """ #img-display-container { max-height: 50vh; } #img-display-input { max-height: 40vh; } #img-display-output { max-height: 40vh; } """ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' model = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to(DEVICE).eval() title = "# ZoeDepth" description = """UnOfficial demo for **ZoeDepth: Zero-shot Transfer by Combining Relative and Metric Depth**. ZoeDepth is a deep learning model for metric depth estimation from a single image. Please refer here for more details -[paper](https://arxiv.org/abs/2302.12288) or [github](https://github.com/isl-org/ZoeDepth) for more details.""" with gr.Blocks(css=css) as demo: gr.Markdown(title) gr.Markdown(description) with gr.Tab("Depth Prediction"): create_depth_pred_demo(model) gr.HTML('''


You can duplicate this Space to skip the queue:Duplicate Space

visitors

''') if __name__ == '__main__': demo.queue().launch()