qg_generation / app.py
dhmeltzer's picture
Update app.py
df404d2
raw
history blame
2.15 kB
import numpy as np
import requests
import streamlit as st
import openai
#def main():
st.title("Scientific Question Generation")
checkpoints = ['dhmeltzer/bart-large_askscience-qg',
'dhmeltzer/flan-t5-base_askscience-qg',
'google/flan-t5-xxl']
headers = {"Authorization": f"Bearer {st.secrets['HF_token']}"}
openai.api_key = st.secrets['OpenAI_token']
def query(checkpoint, payload):
API_URL = f"https://api-inference.huggingface.co/models/{checkpoint}"
response = requests.post(API_URL,
headers=headers,
json=payload)
return response.json()
# User search
user_input = st.text_area("Question Generator",
"""Black holes are the most gravitationally dense objects in the universe.""")
# Filters
st.sidebar.markdown("**Filters**")
temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.0,.1)
if user_input:
for checkpoint in checkpoints:
model_name = checkpoint.split('/')[1]
if 'flan' in model_name.lower():
prompt = 'generate a question: ' + user_input
output = query(checkpoint,{
"inputs": prompt,
"temperature":temperature,
"wait_for_model":True})
else:
prompt = user_input
output = query(checkpoint,{
"inputs": prompt,
"temperature":temperature,
"wait_for_model":True})
st.write(f'Model {model_name}: {output}')
model_engine = "gpt-3.5-turbo"
max_tokens = 50
prompt = f"generate a question: {user_input}"
response=openai.ChatCompletion.create(
model=model_engine,
messages=[
{"role": "system", "content": "You are a helpful assistant that generates questions from text."},
{"role": "user", "content": prompt},
])
output = response['choices'][0]['message']['content']
st.write(f'Model {model_engine}: {output}')
#if __name__ == "__main__":
# main()
#[0]['generated_text']