nguyennghia0902 commited on
Commit
b1af872
1 Parent(s): 1aee72d

Update SampleQA.py

Browse files
Files changed (1) hide show
  1. SampleQA.py +65 -74
SampleQA.py CHANGED
@@ -1,75 +1,66 @@
1
- from os import path
2
- import streamlit as st
3
- import tensorflow as tf
4
- import random
5
- from transformers import ElectraTokenizerFast, TFElectraForQuestionAnswering
6
- from datasets import Dataset, DatasetDict, load_dataset
7
-
8
- model_hf = "nguyennghia0902/bestfailed_electra-small-discriminator_5e-05_16"
9
- tokenizer = ElectraTokenizerFast.from_pretrained(model_hf)
10
- reload_model = TFElectraForQuestionAnswering.from_pretrained(model_hf)
11
-
12
- @st.cache_resource
13
- def predict(question, context):
14
- inputs = tokenizer(question, context, return_offsets_mapping=True,return_tensors="tf",max_length=512, truncation=True)
15
- offset_mapping = inputs.pop("offset_mapping")
16
- outputs = reload_model(**inputs)
17
- answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])
18
- answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])
19
- start_char = offset_mapping[0][answer_start_index][0]
20
- end_char = offset_mapping[0][answer_end_index][1]
21
- predicted_answer_text = context[start_char:end_char]
22
-
23
- return predicted_answer_text
24
-
25
- def main():
26
- st.set_page_config(page_title="Sample in Dataset", page_icon="📝")
27
-
28
- # giving a title to our page
29
- col1, col2 = st.columns([2, 1])
30
- col1.title("Sample in Dataset")
31
-
32
- new_data = load_dataset("nguyennghia0902/project02_textming_dataset", data_files={'train': 'raw_newformat_data/traindata-00000-of-00001.arrow', 'test': 'raw_newformat_data/testdata-00000-of-00001.arrow'})
33
-
34
- sampleQ = ""
35
- sampleC = ""
36
- sampleA = ""
37
- if st.button("Sample"):
38
- sample = random.choice(new_data['test'])
39
- sampleQ = sample['question']
40
- sampleC = sample['context']
41
- sampleA = sample['answers']["text"][0]
42
-
43
- question = st.text_area(
44
- "Sample QUESTION: ",
45
- sampleQ,
46
- height=15,
47
- )
48
- text = st.text_area(
49
- "Sample CONTEXT:",
50
- sampleC,
51
- height=100,
52
- )
53
- answer = st.text_area(
54
- "True ANSWER:",
55
- sampleA,
56
- height=20,
57
- )
58
-
59
- # Create a prediction button
60
- if st.button("Predict"):
61
- prediction = ""
62
- stripped_text = text.strip()
63
- if not stripped_text:
64
- st.error("Please enter a context.")
65
- return
66
- stripped_question = question.strip()
67
- if not stripped_question:
68
- st.error("Please enter a question.")
69
- return
70
-
71
- prediction = predict(stripped_question, stripped_text)
72
- st.success(prediction)
73
-
74
- if __name__ == "__main__":
75
  main()
 
1
+ from os import path
2
+ import streamlit as st
3
+ import tensorflow as tf
4
+ import random
5
+ from transformers import ElectraTokenizerFast, TFElectraForQuestionAnswering
6
+ from datasets import Dataset, DatasetDict, load_dataset
7
+
8
+ model_hf = "nguyennghia0902/bestfailed_electra-small-discriminator_5e-05_16"
9
+ tokenizer = ElectraTokenizerFast.from_pretrained(model_hf)
10
+ reload_model = TFElectraForQuestionAnswering.from_pretrained(model_hf)
11
+
12
+ @st.cache_resource
13
+ def predict(question, context):
14
+ inputs = tokenizer(question, context, return_offsets_mapping=True,return_tensors="tf",max_length=512, truncation=True)
15
+ offset_mapping = inputs.pop("offset_mapping")
16
+ outputs = reload_model(**inputs)
17
+ answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])
18
+ answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])
19
+ start_char = offset_mapping[0][answer_start_index][0]
20
+ end_char = offset_mapping[0][answer_end_index][1]
21
+ predicted_answer_text = context[start_char:end_char]
22
+
23
+ return predicted_answer_text
24
+
25
+ def main():
26
+ st.set_page_config(page_title="Sample in Dataset", page_icon="📝")
27
+
28
+ # giving a title to our page
29
+ col1, col2 = st.columns([2, 1])
30
+ col1.title("Sample in Dataset")
31
+
32
+ new_data = load_dataset("nguyennghia0902/project02_textming_dataset", data_files={'train': 'raw_newformat_data/traindata-00000-of-00001.arrow', 'test': 'raw_newformat_data/testdata-00000-of-00001.arrow'})
33
+
34
+ sampleQ = ""
35
+ sampleC = ""
36
+ sampleA = ""
37
+ if st.button("Sample"):
38
+ sample = random.choice(new_data['test'])
39
+ sampleQ = sample['question']
40
+ sampleC = sample['context']
41
+ sampleA = sample['answers']["text"][0]
42
+
43
+ question = st.text_area(
44
+ "Sample QUESTION: ",
45
+ sampleQ,
46
+ height=15,
47
+ )
48
+ text = st.text_area(
49
+ "Sample CONTEXT:",
50
+ sampleC,
51
+ height=100,
52
+ )
53
+ answer = st.text_area(
54
+ "True ANSWER:",
55
+ sampleA,
56
+ height=20,
57
+ )
58
+
59
+ # Create a prediction button
60
+ if st.button("Predict"):
61
+ prediction = ""
62
+ prediction = predict(sampleQ, sampleC)
63
+ st.success(prediction)
64
+
65
+ if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
66
  main()