dhmeltzer commited on
Commit
822b7d2
1 Parent(s): a0b5df2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -0
app.py CHANGED
@@ -5,6 +5,18 @@ import openai
5
 
6
  def main():
7
  st.title("Scientific Question Generation")
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  checkpoints = ['dhmeltzer/bart-large_askscience-qg',
10
  'dhmeltzer/flan-t5-base_askscience-qg',
 
5
 
6
  def main():
7
  st.title("Scientific Question Generation")
8
+
9
+ st.write("This application is designed to generate a question given a piece of scientific text.\
10
+ We include the output from four different models, the (BART-Large)[https://huggingface.co/dhmeltzer/bart-large_askscience-qg] and FLAN-T5-Base models \
11
+ fine-tuned on the r/AskScience split of the (ELI5 dataset)[https://huggingface.co/datasets/eli5] as well as the zero-shot output \
12
+ of the (FLAN-T5-XXL)[https://huggingface.co/google/flan-t5-xxl] model and the (GPT-3.5-turbo)[https://platform.openai.com/docs/models/gpt-3-5] model.\
13
+ \n \
14
+ For a more thorough discussion of question generation see this (report)[https://wandb.ai/dmeltzer/Question_Generation/reports/Exploratory-Data-Analysis-for-r-AskScience--Vmlldzo0MjQwODg1?accessToken=fndbu2ar26mlbzqdphvb819847qqth2bxyi4hqhugbnv97607mj01qc7ed35v6w8] on EDA and this \
15
+ (report)[https://api.wandb.ai/links/dmeltzer/7an677es] on our training procedure.
16
+ \n \
17
+ **Disclaimer**: You may recieve an error message when you first run the model. We are using the Huggingface API to access the BART-Large and FLAN-T5 models, and the inference API takes around 20 seconds to load the model.\
18
+ In addition, the FLAN-T5-XXL model was recently updated on Huggingface and may give buggy outputs.\
19
+ ")
20
 
21
  checkpoints = ['dhmeltzer/bart-large_askscience-qg',
22
  'dhmeltzer/flan-t5-base_askscience-qg',