Update app.py
Browse files
app.py
CHANGED
@@ -1,29 +1,31 @@
|
|
1 |
-
#
|
2 |
-
# https://learn.deeplearning.ai/courses/open-source-models-hugging-face/lesson/
|
3 |
#
|
4 |
|
5 |
-
from transformers import
|
6 |
-
model =
|
7 |
|
8 |
from transformers import AutoProcessor
|
9 |
-
processor = AutoProcessor.from_pretrained("Salesforce/blip-
|
10 |
|
11 |
from PIL import Image
|
|
|
12 |
import gradio as gr
|
13 |
|
14 |
-
def
|
15 |
-
|
16 |
-
|
17 |
-
output = processor.decode(
|
18 |
return output
|
19 |
|
20 |
gr.close_all()
|
21 |
|
22 |
-
app = gr.Interface(fn=
|
23 |
-
inputs=[gr.Image(label="
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
28 |
app.launch()
|
29 |
-
gr.close_all()
|
|
|
1 |
+
# Visual question answering from
|
2 |
+
# https://learn.deeplearning.ai/courses/open-source-models-hugging-face/lesson/13/multimodal-visual-question-answering
|
3 |
#
|
4 |
|
5 |
+
from transformers import BlipForQuestionAnswering
|
6 |
+
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
|
7 |
|
8 |
from transformers import AutoProcessor
|
9 |
+
processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
|
10 |
|
11 |
from PIL import Image
|
12 |
+
|
13 |
import gradio as gr
|
14 |
|
15 |
+
def answering(image, question):
|
16 |
+
inputs = processor(image, question, return_tensors="pt")
|
17 |
+
out = model.generate(**inputs)
|
18 |
+
output = processor.decode(out[0], skip_special_tokens=True)
|
19 |
return output
|
20 |
|
21 |
gr.close_all()
|
22 |
|
23 |
+
app = gr.Interface(fn=answering,
|
24 |
+
inputs=[gr.Image(label="Picture here", type="pil"),
|
25 |
+
gr.Textbox(label="Question about picture here")],
|
26 |
+
outputs=[gr.Textbox(label="Answer"),],
|
27 |
+
title="Harza's application for answering questions about picture'",
|
28 |
+
description="Harza's miracle application that can answer questions about given picuture!'",
|
29 |
+
allow_flagging="never")
|
30 |
app.launch()
|
31 |
+
gr.close_all()
|