Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ menu = st.sidebar.radio("", options=["Introduction", "Parsing NLU data into SQuA
|
|
8 |
"Evaluation"], index=0)
|
9 |
|
10 |
|
11 |
-
if menu == "
|
12 |
|
13 |
st.markdown('''
|
14 |
|
@@ -41,7 +41,35 @@ if menu == "Introduction":
|
|
41 |
NLU dataset (e.g. utterances and intent / slot annotations) into [SQuAD 2.0 format](https://rajpurkar.github.io/SQuAD-explorer/explore/v2.0/dev/)
|
42 |
question-answering data that can be used by QANLU.
|
43 |
|
|
|
|
|
|
|
44 |
''')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
elif menu == "Parsing NLU data into SQuAD 2.0":
|
47 |
st.header('QA-NLU Data Parsing')
|
@@ -167,32 +195,6 @@ elif menu == "Evaluation":
|
|
167 |
st.markdown('''
|
168 |
To assess the performance of the trained model, we can use the `calculate_pr.py` script from the [QA-NLU Amazon Research repository](https://github.com/amazon-research/question-answering-nlu).
|
169 |
|
170 |
-
Feel free to query the pre-trained QA-NLU model
|
171 |
-
|
172 |
-
*Please note that this model has been trained on ATIS and may be need to be further fine-tuned to support intents and slots that are not covered in ATIS*.
|
173 |
''')
|
174 |
-
|
175 |
-
tokenizer = AutoTokenizer.from_pretrained("AmazonScience/qanlu")
|
176 |
-
|
177 |
-
model = AutoModelForQuestionAnswering.from_pretrained("AmazonScience/qanlu")
|
178 |
-
|
179 |
-
qa_pipeline = pipeline('question-answering', model=model, tokenizer=tokenizer)
|
180 |
-
|
181 |
-
context = st.text_input(
|
182 |
-
'Please enter the context (remember to include "Yes. No. " in the beginning):',
|
183 |
-
value="Yes. No. I want a cheap flight to Boston."
|
184 |
-
)
|
185 |
-
question = st.text_input(
|
186 |
-
'Please enter the intent question:',
|
187 |
-
value="Are they looking for a flight?"
|
188 |
-
)
|
189 |
-
|
190 |
-
|
191 |
-
qa_input = {
|
192 |
-
'context': context,
|
193 |
-
'question': question
|
194 |
-
}
|
195 |
-
|
196 |
-
if st.button('Ask QANLU'):
|
197 |
-
answer = qa_pipeline(qa_input)
|
198 |
-
st.write(answer)
|
|
|
8 |
"Evaluation"], index=0)
|
9 |
|
10 |
|
11 |
+
if menu == "Demo":
|
12 |
|
13 |
st.markdown('''
|
14 |
|
|
|
41 |
NLU dataset (e.g. utterances and intent / slot annotations) into [SQuAD 2.0 format](https://rajpurkar.github.io/SQuAD-explorer/explore/v2.0/dev/)
|
42 |
question-answering data that can be used by QANLU.
|
43 |
|
44 |
+
Feel free to query the pre-trained QA-NLU model using the buttons below.
|
45 |
+
|
46 |
+
*Please note that this model has been trained on ATIS and may be need to be further fine-tuned to support intents and slots that are not covered in ATIS*.
|
47 |
''')
|
48 |
+
|
49 |
+
tokenizer = AutoTokenizer.from_pretrained("AmazonScience/qanlu")
|
50 |
+
|
51 |
+
model = AutoModelForQuestionAnswering.from_pretrained("AmazonScience/qanlu")
|
52 |
+
|
53 |
+
qa_pipeline = pipeline('question-answering', model=model, tokenizer=tokenizer)
|
54 |
+
|
55 |
+
context = st.text_input(
|
56 |
+
'Please enter the context (remember to include "Yes. No. " in the beginning):',
|
57 |
+
value="Yes. No. I want a cheap flight to Boston."
|
58 |
+
)
|
59 |
+
question = st.text_input(
|
60 |
+
'Please enter the intent question:',
|
61 |
+
value="Are they looking for a flight?"
|
62 |
+
)
|
63 |
+
|
64 |
+
|
65 |
+
qa_input = {
|
66 |
+
'context': context,
|
67 |
+
'question': question
|
68 |
+
}
|
69 |
+
|
70 |
+
if st.button('Ask QANLU'):
|
71 |
+
answer = qa_pipeline(qa_input)
|
72 |
+
st.write(answer)
|
73 |
|
74 |
elif menu == "Parsing NLU data into SQuAD 2.0":
|
75 |
st.header('QA-NLU Data Parsing')
|
|
|
195 |
st.markdown('''
|
196 |
To assess the performance of the trained model, we can use the `calculate_pr.py` script from the [QA-NLU Amazon Research repository](https://github.com/amazon-research/question-answering-nlu).
|
197 |
|
198 |
+
Feel free to query the pre-trained QA-NLU model in the Demo section.
|
|
|
|
|
199 |
''')
|
200 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|