File size: 2,189 Bytes
dfbe641
 
 
1d1bc23
dfbe641
85c676c
 
d21a4cc
85c676c
 
 
d21a4cc
85c676c
 
 
 
 
3d891e3
85c676c
 
 
3d891e3
85c676c
 
 
 
 
 
 
 
 
 
 
 
 
 
d21a4cc
a0b5df2
 
 
6f4097c
85c676c
6f4097c
 
 
85c676c
 
 
1d1bc23
85c676c
 
 
 
3d891e3
85c676c
1d1bc23
85c676c
 
 
 
 
 
1d1bc23
85c676c
 
 
1d1bc23
 
85c676c
 
df404d2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import numpy as np
import requests
import streamlit as st
import openai

def main():
    st.title("Scientific Question Generation")
    
    checkpoints = ['dhmeltzer/bart-large_askscience-qg',
              'dhmeltzer/flan-t5-base_askscience-qg',
              'google/flan-t5-xxl']
    
    headers = {"Authorization": f"Bearer {st.secrets['HF_token']}"}
    openai.api_key = st.secrets['OpenAI_token']
    
    def query(checkpoint, payload):
        API_URL = f"https://api-inference.huggingface.co/models/{checkpoint}"
        
        response = requests.post(API_URL, 
                                    headers=headers, 
                                    json=payload)
        
        return response.json()
    
    # User search
    user_input = st.text_area("Question Generator", 
                                """Black holes are the most gravitationally dense objects in the universe.""")
    
    if user_input:
        for checkpoint in checkpoints:
            
            model_name = checkpoint.split('/')[1]
    
            if 'flan' in model_name.lower():
                
                prompt = 'generate a question: ' + user_input

            else:
                prompt = user_input
            
            output = query(checkpoint,{
                        "inputs": prompt,
                        "wait_for_model":True})
            try:
                output=output[0]['generated_text']
            except:
                st.write(output)
                return
            
            st.write(f'Model {model_name}: {output}')
    
        model_engine = "gpt-3.5-turbo"
        max_tokens = 50
        
        prompt = f"generate a question: {user_input}"
    
        response=openai.ChatCompletion.create(
            model=model_engine,
            messages=[
                {"role": "system", "content": "You are a helpful assistant that generates questions from text."},
                {"role": "user", "content": prompt},
            ])
    
        output = response['choices'][0]['message']['content']
        
        st.write(f'Model {model_engine}: {output}')

        
if __name__ == "__main__":
    main()
#[0]['generated_text']