Towhidul commited on
Commit
0d89b24
1 Parent(s): ed05241

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -21,7 +21,7 @@ def query(payload):
21
 
22
  #-----------------------------------------------------------
23
 
24
- API_URL_evidence = "https://api-inference.huggingface.co/models/google/flan-t5-xxl"
25
  headers_evidence = {"Authorization": HF_SPACES_API_KEY}
26
 
27
  def query_evidence(payload):
@@ -102,11 +102,11 @@ claim_text=st.text_area("Enter your claim:", on_change=proc, key='text_key')
102
  # if evidence_text:
103
  # st.write("You entered: ", evidence_text)
104
 
105
- # evidence_text=st.text_area("Enter your evidence:")
106
 
107
- form_evidence = st.form(key='my_evidence')
108
- form_evidence.text_input(label='Enter your evidence')
109
- evidence_text = form_evidence.form_submit_button(label='Submit')
110
 
111
  if evidence_text:
112
  st.caption(':green[Kindly hold on for a few minutes while the QA pairs are being generated]')
@@ -384,7 +384,7 @@ def gen_qa_who(df):
384
  list_of_ans_who.append(f"""Ans{j+1}:{answer[j]}""")
385
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
386
  #time.sleep(10)
387
- answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
388
  if answer_evidence.lower() in evidence.lower():
389
  list_of_evidence_answer_who.append(f"""Evidence{j+1}:{answer_evidence}""")
390
  else:
@@ -444,7 +444,7 @@ def gen_qa_what(df):
444
  list_of_ans_what.append(f"""Ans{j+1}:{answer[j]}""")
445
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
446
  #time.sleep(10)
447
- answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
448
  if answer_evidence.lower() in evidence.lower():
449
  list_of_evidence_answer_what.append(f"""Evidence{j+1}:{answer_evidence}""")
450
 
@@ -506,7 +506,7 @@ def gen_qa_why(df):
506
  list_of_ans_why.append(f"""Ans{j+1}:{answer[j]}""")
507
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
508
  #time.sleep(10)
509
- answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
510
  if answer_evidence.lower() in evidence.lower():
511
  list_of_evidence_answer_why.append(f"""Evidence{j+1}:{answer_evidence}""")
512
  else:
@@ -567,7 +567,7 @@ def gen_qa_when(df):
567
  list_of_ans_when.append(f"""Ans{j+1}:{answer[j]}""")
568
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
569
  #time.sleep(10)
570
- answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
571
  if answer_evidence.lower() in evidence.lower():
572
  list_of_evidence_answer_when.append(f"""Evidence{j+1}:{answer_evidence}""")
573
  else:
@@ -628,7 +628,7 @@ def gen_qa_where(df):
628
  list_of_ans_where.append(f"""Ans{j+1}:{answer[j]}""")
629
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
630
  #time.sleep(10)
631
- answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
632
  if answer_evidence.lower() in evidence.lower():
633
  list_of_evidence_answer_where.append(f"""Evidence{j+1}:{answer_evidence}""")
634
  else:
 
21
 
22
  #-----------------------------------------------------------
23
 
24
+ API_URL_evidence = "https://api-inference.huggingface.co/models/t5-3b"
25
  headers_evidence = {"Authorization": HF_SPACES_API_KEY}
26
 
27
  def query_evidence(payload):
 
102
  # if evidence_text:
103
  # st.write("You entered: ", evidence_text)
104
 
105
+ evidence_text=st.text_area("Enter your evidence:")
106
 
107
+ # form_evidence = st.form(key='my_evidence')
108
+ # form_evidence.text_input(label='Enter your evidence')
109
+ # evidence_text = form_evidence.form_submit_button(label='Submit')
110
 
111
  if evidence_text:
112
  st.caption(':green[Kindly hold on for a few minutes while the QA pairs are being generated]')
 
384
  list_of_ans_who.append(f"""Ans{j+1}:{answer[j]}""")
385
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
386
  #time.sleep(10)
387
+ answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['translation_text']
388
  if answer_evidence.lower() in evidence.lower():
389
  list_of_evidence_answer_who.append(f"""Evidence{j+1}:{answer_evidence}""")
390
  else:
 
444
  list_of_ans_what.append(f"""Ans{j+1}:{answer[j]}""")
445
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
446
  #time.sleep(10)
447
+ answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['translation_text']
448
  if answer_evidence.lower() in evidence.lower():
449
  list_of_evidence_answer_what.append(f"""Evidence{j+1}:{answer_evidence}""")
450
 
 
506
  list_of_ans_why.append(f"""Ans{j+1}:{answer[j]}""")
507
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
508
  #time.sleep(10)
509
+ answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['translation_text']
510
  if answer_evidence.lower() in evidence.lower():
511
  list_of_evidence_answer_why.append(f"""Evidence{j+1}:{answer_evidence}""")
512
  else:
 
567
  list_of_ans_when.append(f"""Ans{j+1}:{answer[j]}""")
568
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
569
  #time.sleep(10)
570
+ answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['translation_text']
571
  if answer_evidence.lower() in evidence.lower():
572
  list_of_evidence_answer_when.append(f"""Evidence{j+1}:{answer_evidence}""")
573
  else:
 
628
  list_of_ans_where.append(f"""Ans{j+1}:{answer[j]}""")
629
  input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
630
  #time.sleep(10)
631
+ answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['translation_text']
632
  if answer_evidence.lower() in evidence.lower():
633
  list_of_evidence_answer_where.append(f"""Evidence{j+1}:{answer_evidence}""")
634
  else: