File size: 2,601 Bytes
7311ece
 
a8b33d5
648a832
b74edd6
df79b0a
7311ece
 
 
 
df79b0a
5121484
7311ece
 
 
 
 
fb1af15
7d328db
7311ece
 
 
 
 
db1dd40
 
 
 
 
 
a8b33d5
 
7d328db
a8b33d5
 
 
 
 
648a832
 
 
 
 
0dcb7a0
648a832
 
 
b74edd6
 
 
 
 
7d328db
b74edd6
 
 
 
7311ece
e76e754
 
648a832
 
e76e754
 
 
7311ece
df79b0a
 
 
80e4a0c
df79b0a
5121484
7311ece
e76e754
5121484
 
df79b0a
 
7311ece
 
 
 
 
 
7d328db
df79b0a
 
5121484
80e4a0c
df79b0a
 
7311ece
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import gradio as gr
from neon_llm_chatgpt.chatgpt import ChatGPT
from neon_llm_palm2.palm2 import Palm2
from neon_llm_gemini.gemini import Gemini
from neon_llm_claude.claude import Claude
from utils import convert_history, PersonaConverter
import os



role = ""
biography = ""


key = os.environ['OPENAI_API_KEY']
config = {
    "key": key,
    "model": "gpt-3.5-turbo",
    "role": role,
    "context_depth": 3,
    "max_tokens": 256,    
}
chatgpt = ChatGPT(config)

key_google = os.environ['GOOGLE_API_KEY']
key_path = "./vertex.json"

with open(key_path, "w") as json_file:
    json_file.write(key_google)

config = {
    "key_path": key_path,
    "role": role,
    "context_depth": 3,
    "max_tokens": 256,    
}
palm2 = Palm2(config)

config = {
    "model": "gemini-pro",
    "key_path": key_path,
    "role": role,
    "context_depth": 3,
    "max_tokens": 1024,    
}
gemini = Gemini(config)

key_anthropic = os.environ['ANTHROPIC_API_KEY']
config = {
    "key": key_anthropic,
    "openai_key": key,
    "model": "claude-2",
    "role": role,
    "context_depth": 3,
    "max_tokens": 256,    
}
claude = Claude(config)

model_choices = {
    "openai": chatgpt,
    "palm2": palm2,
    "gemini": gemini,
    "anthropic": claude,
}
model_choices_list = list(model_choices.keys())


personaConverter = PersonaConverter()
preset_choices_list = list(personaConverter.personas.keys())
figures_choices_list = list(personaConverter.historical_figures.keys())

def ask(message, history, persona, model_name, preset, biography, figure, imaginary, short):
    chat_history = convert_history(history)
    model = model_choices[model_name]
    persona_description = personaConverter(name = preset, figure = figure, 
                                description = persona, biography = biography,
                                imaginary = imaginary, short = short)
    responce = model.ask(message, chat_history, persona = {"description": persona_description})

    return responce


demo = gr.ChatInterface(ask, 
    additional_inputs=[
        gr.Textbox(role, label="Persona"), 
        gr.Dropdown(choices=model_choices_list, value=model_choices_list[0], label="Model"),
        gr.Dropdown(choices=preset_choices_list, value=preset_choices_list[0], label="Preset"),
        gr.Textbox(biography, label="Biography"),
        gr.Dropdown(choices=figures_choices_list, value=figures_choices_list[0], label="Historical Figures"),
        gr.Checkbox(value=False, label="Imaginary"),
        gr.Checkbox(value=True, label="Short response"),
    ]
)

if __name__ == "__main__":
    demo.queue().launch()