File size: 1,223 Bytes
f42556b 715962f 20b8056 f42556b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import gradio as gr
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
model_name = "deepset/roberta-base-squad2"
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
def chat(context, question):
QA_input = {
"question" : question,
"context" : context
}
res = nlp(QA_input)
return res['answer']
screen = gr.Interface(
fn = chat,
inputs = [gr.Textbox(lines = 8, placeholder = "Enter your context here π"), gr.Textbox(lines = 2, placeholder = "Enter your question here π")],
outputs = gr.Textbox(lines = 10, placeholder = "Your answer will be here soon π"),
title="Facilitating the QnA with roberta-base-squad2 π©π»βπ»πβπ»π‘",
description="This app aims to facilitate the simple QnA with the provided contextπ‘",
theme="soft",
article = """### Disclaimer : This model is purely used for QnA. User is expected to paste the text from which they want the answer in context section. <br>        Then paste the question in the question section. <br>        User will get the answer in the output section."""
)
screen.launch() |