nielsr HF staff merve HF staff commited on
Commit
03287b2
1 Parent(s): e927f5e

Improve description on what the model does (#1)

Browse files

- Improve description on what the model does (5a1db4f1475380f4ee27143490be34b97e5c20e3)


Co-authored-by: Merve Noyan <[email protected]>

Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -26,7 +26,7 @@ answer = gr.outputs.Textbox(label="Predicted answer")
26
  examples = [["cats.jpg", "How many cats are there?"]]
27
 
28
  title = "Interactive demo: ViLT"
29
- description = "Gradio Demo for ViLT (Vision and Language Transformer), fine-tuned on VQAv2. To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."
30
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2102.03334' target='_blank'>ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision</a> | <a href='https://github.com/dandelin/ViLT' target='_blank'>Github Repo</a></p>"
31
 
32
  interface = gr.Interface(fn=answer_question,
 
26
  examples = [["cats.jpg", "How many cats are there?"]]
27
 
28
  title = "Interactive demo: ViLT"
29
+ description = "Gradio Demo for ViLT (Vision and Language Transformer), fine-tuned on VQAv2, a model that can answer questions from images. To use it, simply upload your image and type a question and click 'submit', or click one of the examples to load them. Read more at the links below."
30
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2102.03334' target='_blank'>ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision</a> | <a href='https://github.com/dandelin/ViLT' target='_blank'>Github Repo</a></p>"
31
 
32
  interface = gr.Interface(fn=answer_question,