Spaces:
Runtime error
Runtime error
Update space id
Browse files- app.py +6 -13
- gradio_caption.py +7 -7
app.py
CHANGED
@@ -1,28 +1,21 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
-
import torch
|
3 |
|
4 |
from gradio_caption import create_demo as create_caption
|
5 |
from gradio_vqa import create_demo as create_vqa
|
6 |
|
7 |
|
8 |
-
css = """
|
9 |
-
#img-display-input {
|
10 |
-
height: auto;
|
11 |
-
max-height: 40vh;
|
12 |
-
}
|
13 |
-
#img-display-output {
|
14 |
-
max-height: 40vh;
|
15 |
-
}
|
16 |
-
"""
|
17 |
-
|
18 |
-
|
19 |
description = """
|
20 |
# Prismer
|
21 |
The official demo for **Prismer: A Vision-Language Model with An Ensemble of Experts**.
|
22 |
Please refer to our [project page](https://shikun.io/projects/prismer) or [github](https://github.com/NVlabs/prismer) for more details.
|
23 |
"""
|
24 |
|
25 |
-
|
|
|
|
|
|
|
|
|
26 |
gr.Markdown(description)
|
27 |
with gr.Tab("Zero-shot Image Captioning"):
|
28 |
create_caption()
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
|
|
3 |
|
4 |
from gradio_caption import create_demo as create_caption
|
5 |
from gradio_vqa import create_demo as create_vqa
|
6 |
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
description = """
|
9 |
# Prismer
|
10 |
The official demo for **Prismer: A Vision-Language Model with An Ensemble of Experts**.
|
11 |
Please refer to our [project page](https://shikun.io/projects/prismer) or [github](https://github.com/NVlabs/prismer) for more details.
|
12 |
"""
|
13 |
|
14 |
+
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
15 |
+
description += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
16 |
+
|
17 |
+
|
18 |
+
with gr.Blocks() as demo:
|
19 |
gr.Markdown(description)
|
20 |
with gr.Tab("Zero-shot Image Captioning"):
|
21 |
create_caption()
|
gradio_caption.py
CHANGED
@@ -12,18 +12,18 @@ def create_demo():
|
|
12 |
with gr.Row():
|
13 |
with gr.Column(scale=1):
|
14 |
model_type = gr.Dropdown(["Prismer-Base", "Prismer-Large"], label="Model Size", value="Prismer-Base")
|
15 |
-
rgb = gr.Image(label="Input Image", type='pil'
|
16 |
submit = gr.Button("Submit")
|
17 |
with gr.Column(scale=2):
|
18 |
pred = gr.Textbox(label="Model Prediction")
|
19 |
with gr.Row():
|
20 |
-
depth = gr.Image(label="Depth"
|
21 |
-
edge = gr.Image(label="Edge"
|
22 |
-
normals = gr.Image(label="Normals"
|
23 |
with gr.Row():
|
24 |
-
seg = gr.Image(label="Segmentation"
|
25 |
-
obj_det = gr.Image(label="Object Detection"
|
26 |
-
ocr_det = gr.Image(label="OCR Detection"
|
27 |
|
28 |
def on_submit(im, model_type):
|
29 |
return pred, depth, edge, normals, seg, obj_det, ocr_det
|
|
|
12 |
with gr.Row():
|
13 |
with gr.Column(scale=1):
|
14 |
model_type = gr.Dropdown(["Prismer-Base", "Prismer-Large"], label="Model Size", value="Prismer-Base")
|
15 |
+
rgb = gr.Image(label="Input Image", type='pil')
|
16 |
submit = gr.Button("Submit")
|
17 |
with gr.Column(scale=2):
|
18 |
pred = gr.Textbox(label="Model Prediction")
|
19 |
with gr.Row():
|
20 |
+
depth = gr.Image(label="Depth")
|
21 |
+
edge = gr.Image(label="Edge")
|
22 |
+
normals = gr.Image(label="Normals")
|
23 |
with gr.Row():
|
24 |
+
seg = gr.Image(label="Segmentation")
|
25 |
+
obj_det = gr.Image(label="Object Detection")
|
26 |
+
ocr_det = gr.Image(label="OCR Detection")
|
27 |
|
28 |
def on_submit(im, model_type):
|
29 |
return pred, depth, edge, normals, seg, obj_det, ocr_det
|