abdullahmeda commited on
Commit
f0c73b9
1 Parent(s): 0780d8d

minor changes

Browse files
Files changed (1) hide show
  1. app.py +24 -23
app.py CHANGED
@@ -4,35 +4,36 @@ from langchain.chat_models import ChatOpenAI
4
  from langchain.chains import ConversationChain
5
  from langchain.memory import ConversationBufferMemory
6
 
7
-
8
- def respond(openai_api_key, message, buffer_memory, chat_history):
9
  conversation = ConversationChain(
10
- llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo'),
11
- memory = buffer_memory,
12
- openai_api_key = openai_api_key
 
 
 
13
  )
14
  response = conversation.predict(input=message)
15
  chat_history.append([message, response])
16
  return "", buffer_memory, chat_history
17
 
18
 
19
- with gr.Blocks() as demo:
20
  # with gr.Column():
21
- with gr.Group(visible=True) as primary_settings:
22
- with gr.Row():
23
- openai_key = gr.Textbox(
24
- label="OpenAI Key",
25
- type="password",
26
- placeholder="sk-a83jv6fn3x8ndm78b5W..."
27
- )
28
- model = gr.Dropdown(
29
- ["gpt-4", "gpt-4-32k",
30
- "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-instruct",
31
- "text-davinci-002", "text-davinci-003"],
32
- label="OpenAI Model",
33
- value="gpt-3.5-turbo",
34
- interactive=True
35
- )
36
  # with gr.Accordion("Advances Settings"):
37
  # gr.Dropdown(
38
  # [-1, 1, 5, 10, 25], label="Conversation Buffer (k)"
@@ -53,8 +54,8 @@ with gr.Blocks() as demo:
53
  min_width=0)
54
 
55
  # Event Handling
56
- query.submit(respond, [openai_key, query, memory, chatbot], [query, memory, chatbot])
57
- submit.click(respond, [openai_key, query, memory, chatbot], [query, memory, chatbot])
58
 
59
  if __name__ == "__main__":
60
  demo.launch()
 
4
  from langchain.chains import ConversationChain
5
  from langchain.memory import ConversationBufferMemory
6
 
7
+ def respond(openai_api_key, openai_model, message, buffer_memory, chat_history):
 
8
  conversation = ConversationChain(
9
+ llm = ChatOpenAI(
10
+ temperature=0.0,
11
+ model=openai_model,
12
+ openai_api_key=openai_api_key
13
+ ),
14
+ memory = buffer_memory
15
  )
16
  response = conversation.predict(input=message)
17
  chat_history.append([message, response])
18
  return "", buffer_memory, chat_history
19
 
20
 
21
+ with gr.Blocks(css="#component-0 { max-width: 900px; margin: auto; padding-top: 1.5rem; }") as demo:
22
  # with gr.Column():
23
+ with gr.Row(visible=True) as primary_settings:
24
+ openai_key = gr.Textbox(
25
+ label="OpenAI Key",
26
+ type="password",
27
+ placeholder="sk-a83jv6fn3x8ndm78b5W..."
28
+ )
29
+ model = gr.Dropdown(
30
+ ["gpt-4", "gpt-4-32k",
31
+ "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-instruct",
32
+ "text-davinci-002", "text-davinci-003"],
33
+ label="OpenAI Model",
34
+ value="gpt-3.5-turbo",
35
+ interactive=True
36
+ )
 
37
  # with gr.Accordion("Advances Settings"):
38
  # gr.Dropdown(
39
  # [-1, 1, 5, 10, 25], label="Conversation Buffer (k)"
 
54
  min_width=0)
55
 
56
  # Event Handling
57
+ query.submit(respond, [openai_key, model, query, memory, chatbot], [query, memory, chatbot])
58
+ submit.click(respond, [openai_key, model, query, memory, chatbot], [query, memory, chatbot])
59
 
60
  if __name__ == "__main__":
61
  demo.launch()