dhmeltzer commited on
Commit
1d1bc23
1 Parent(s): 3d891e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -5
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import numpy as np
2
  import requests
3
  import streamlit as st
 
4
 
5
  #def main():
6
  st.title("Scientific Question Generation")
@@ -10,6 +11,7 @@ checkpoints = ['dhmeltzer/bart-large_askscience-qg',
10
  'google/flan-t5-xxl']
11
 
12
  headers = {"Authorization": f"Bearer {st.secrets['HF_token']}"}
 
13
 
14
  def query(checkpoint, payload):
15
  API_URL = f"https://api-inference.huggingface.co/models/{checkpoint}"
@@ -35,16 +37,40 @@ if user_input:
35
  model_name = checkpoint.split('/')[1]
36
 
37
  if 'flan' in model_name.lower():
38
- user_input = 'generate a question: '+user_input
 
39
 
40
- output = query(checkpoint,{
41
- "inputs": user_input,
42
- "temperature":temperature,
43
- "wait_for_model":True})
 
44
 
 
 
 
 
 
 
45
 
46
  st.write(f'Model {model_name}: {output}')
47
 
 
 
 
 
 
 
 
 
 
 
 
48
 
 
 
 
 
 
49
  #if __name__ == "__main__":
50
  # main()
 
1
  import numpy as np
2
  import requests
3
  import streamlit as st
4
+ import openai
5
 
6
  #def main():
7
  st.title("Scientific Question Generation")
 
11
  'google/flan-t5-xxl']
12
 
13
  headers = {"Authorization": f"Bearer {st.secrets['HF_token']}"}
14
+ openai.api_key = st.secrets['OpenAI_token']
15
 
16
  def query(checkpoint, payload):
17
  API_URL = f"https://api-inference.huggingface.co/models/{checkpoint}"
 
37
  model_name = checkpoint.split('/')[1]
38
 
39
  if 'flan' in model_name.lower():
40
+
41
+ prompt = 'generate a question: ' + user_input
42
 
43
+ output = query(checkpoint,{
44
+ "inputs": prompt,
45
+ "temperature":temperature,
46
+ "wait_for_model":True})[0]['generated_text']
47
+ else:
48
 
49
+ prompt = user_input
50
+
51
+ output = query(checkpoint,{
52
+ "inputs": prompt,
53
+ "temperature":temperature,
54
+ "wait_for_model":True})[0]['generated_text']
55
 
56
  st.write(f'Model {model_name}: {output}')
57
 
58
+ model_engine = "gpt-3.5-turbo"
59
+ max_tokens = 50
60
+
61
+ prompt = f"generate a question: {user_input}"
62
+
63
+ response=openai.ChatCompletion.create(
64
+ model=model_engine,
65
+ messages=[
66
+ {"role": "system", "content": "You are a helpful assistant that generates questions from text."},
67
+ {"role": "user", "content": prompt},
68
+ ])
69
 
70
+ output = response['choices'][0]['message']['content']
71
+
72
+ st.write(f'Model {model_engine}: {output}')
73
+
74
+
75
  #if __name__ == "__main__":
76
  # main()