File size: 2,173 Bytes
dfbe641
 
 
1d1bc23
dfbe641
27ee478
 
dfbe641
d21a4cc
 
 
dfbe641
d21a4cc
1d1bc23
d21a4cc
 
1743c82
d21a4cc
27ee478
 
 
d21a4cc
27ee478
dfbe641
27ee478
 
 
dfbe641
27ee478
 
dfbe641
27ee478
dfbe641
27ee478
d21a4cc
3d891e3
 
 
 
1d1bc23
 
3d891e3
1d1bc23
 
 
 
 
d21a4cc
1d1bc23
 
 
 
 
 
3d891e3
3979c35
dfbe641
1d1bc23
 
 
 
 
 
 
 
 
 
 
27ee478
1d1bc23
 
 
 
 
27ee478
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import numpy as np
import requests
import streamlit as st
import openai

#def main():
st.title("Scientific Question Generation")

checkpoints = ['dhmeltzer/bart-large_askscience-qg',
          'dhmeltzer/flan-t5-base_askscience-qg',
          'google/flan-t5-xxl']

headers = {"Authorization": f"Bearer {st.secrets['HF_token']}"}
openai.api_key = st.secrets['OpenAI_token']

def query(checkpoint, payload):
    API_URL = f"https://api-inference.huggingface.co/models/{checkpoint}"
    
    response = requests.post(API_URL, 
                                headers=headers, 
                                json=payload)
    
    return response.json()

# User search
user_input = st.text_area("Question Generator", 
                            """Black holes are the most gravitationally dense objects in the universe.""")

# Filters
st.sidebar.markdown("**Filters**")

temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.0,.1)

if user_input:
    for checkpoint in checkpoints:
        
        model_name = checkpoint.split('/')[1]

        if 'flan' in model_name.lower():
            
            prompt = 'generate a question: ' + user_input
        
            output = query(checkpoint,{
                "inputs": prompt,
                "temperature":temperature,
                "wait_for_model":True})[0]['generated_text']
        else:

            prompt = user_input
            
            output = query(checkpoint,{
                "inputs": prompt,
                "temperature":temperature,
                "wait_for_model":True})[0]['generated_text']
        
        st.write(f'Model {model_name}: {output}')

    model_engine = "gpt-3.5-turbo"
    max_tokens = 50
    
    prompt = f"generate a question: {user_input}"

    response=openai.ChatCompletion.create(
        model=model_engine,
        messages=[
            {"role": "system", "content": "You are a helpful assistant that generates questions from text."},
            {"role": "user", "content": prompt},
        ])

    output = response['choices'][0]['message']['content']
    
    st.write(f'Model {model_engine}: {output}')

        
#if __name__ == "__main__":
#    main()