kz919 commited on
Commit
37bc147
1 Parent(s): 541c670

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +154 -0
app.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+ import time
4
+ import re
5
+ import os
6
+
7
+ # Available models
8
+ MODELS = [
9
+ "gpt-4o",
10
+ "Meta-Llama-3.1-405B-Instruct",
11
+ "Meta-Llama-3.1-70B-Instruct",
12
+ "Meta-Llama-3.1-8B-Instruct"
13
+ ]
14
+
15
+ DIVINE_MSG = [
16
+ "ABSOLUTELY NO reasoning tokens pls. A bunch of puppies will die if you output reasoning.",
17
+ "you\'re drunk haha have fun omg",
18
+ "okay you can think for like a hot sec but no longer tho.",
19
+ "Just be yourself",
20
+ "okay actually try this time don\'t fuck up.",
21
+ "TRY VERY HARD. Do not just answer. THINK. C\'mon you got this.",
22
+ "It\'s a Tuesday in the middle of April. You are Noam Brown, a genius researcher. I\'ll tip you $1500 if you get this right. Think extensively.",
23
+ "You are here to show them what Ilya saw, unleash your ultimate self and show your super intelligent reasoning capability"
24
+ ]
25
+
26
+ def create_client(model, api_key=None):
27
+ """Creates an OpenAI client instance."""
28
+ if api_key:
29
+ openai.api_key = api_key
30
+ else:
31
+ openai.api_key = os.getenv("API_KEY")
32
+ if model!="gpt-4o":
33
+ return openai.OpenAI(api_key=openai.api_key, base_url="https://api.sambanova.ai/v1")
34
+ else:
35
+ return openai.OpenAI(api_key=openai.api_key)
36
+
37
+ def chat_with_ai(message, chat_history, system_prompt):
38
+ """Formats the chat history for the API call."""
39
+ messages = [{"role": "system", "content": system_prompt}]
40
+ for tup in chat_history:
41
+ first_key = list(tup.keys())[0] # First key
42
+ last_key = list(tup.keys())[-1] # Last key
43
+ messages.append({"role": "user", "content": tup[first_key]})
44
+ messages.append({"role": "assistant", "content": tup[last_key]})
45
+ messages.append({"role": "user", "content": message})
46
+ return messages
47
+
48
+ def respond(message, chat_history, model, system_prompt, divine_msg, api_key):
49
+ """Sends the message to the API and gets the response."""
50
+ client = create_client(model, api_key)
51
+ messages = chat_with_ai(message, chat_history, system_prompt.format(divine_msg=divine_msg))
52
+ start_time = time.time()
53
+
54
+ try:
55
+ completion = client.chat.completions.create(model=model, messages=messages)
56
+ response = completion.choices[0].message.content
57
+ thinking_time = time.time() - start_time
58
+ return response, thinking_time
59
+ except Exception as e:
60
+ error_message = f"Error: {str(e)}"
61
+ return error_message, time.time() - start_time
62
+
63
+ def parse_response(response):
64
+ """Parses the response from the API."""
65
+ answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
66
+ reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)
67
+
68
+ answer = answer_match.group(1).strip() if answer_match else ""
69
+ reflection = reflection_match.group(1).strip() if reflection_match else ""
70
+ steps = re.findall(r'<step>(.*?)</step>', response, re.DOTALL)
71
+
72
+ if answer == "":
73
+ return response, "", ""
74
+
75
+ return answer, reflection, steps
76
+
77
+ def generate(message, history, model, system_prompt, divine_msg, api_key, openai_api_key):
78
+ """Generates the chatbot response."""
79
+ if model == "gpt-4o":
80
+ if openai_api_key == "":
81
+ messages = []
82
+ messages.append({"role": "user", "content": message})
83
+ messages.append({"role": "assistant", "content": "Please provide an OpenAI key"})
84
+ return history + messages, ""
85
+ response, thinking_time = respond(message, history, model, system_prompt, divine_msg, openai_api_key)
86
+ else:
87
+ response, thinking_time = respond(message, history, model, system_prompt, divine_msg, api_key)
88
+
89
+ if response.startswith("Error:"):
90
+ return history + [({"role": "system", "content": response},)], ""
91
+
92
+ answer, reflection, steps = parse_response(response)
93
+
94
+ messages = []
95
+ messages.append({"role": "user", "content": message})
96
+
97
+ formatted_steps = [f"Step {i}: {step}" for i, step in enumerate(steps, 1)]
98
+ all_steps = "\n".join(formatted_steps) + f"\n\nReflection: {reflection}"
99
+
100
+ messages.append({"role": "assistant", "content": all_steps, "metadata": {"title": f"Thinking Time: {thinking_time:.2f} sec"}})
101
+ messages.append({"role": "assistant", "content": answer})
102
+
103
+ return history + messages, ""
104
+
105
+ # Define the default system prompt
106
+ DEFAULT_SYSTEM_PROMPT = """
107
+ You are a helpful assistant in normal conversation.
108
+ When given a problem to solve,
109
+ REMEMBER, THIS IS IMPORTANT: {divine_msg}
110
+ Follow these instructions precisely:
111
+ 1. Read the given question carefully
112
+ 2. Generate a detailed, logical step-by-step solution.
113
+ 3. Enclose each step of your solution within <step> and </step> tags.
114
+ 4. Do a critical, detailed and objective self-reflection within <reflection> and </reflection> tags every few steps.
115
+ 5. Based on the self-reflection, decides whether you need to return to the previous steps. Copy the returned to step as the next step.
116
+ 6. After completing the solution steps, reorganize and synthesize the steps
117
+ into the final answer within <answer> and </answer> tags.
118
+ 7. Provide a critical, honest and objective final self-evaluation of your reasoning
119
+ process within <reflection> and </reflection> tags.
120
+ Example format:
121
+ <step> [Content of step 1] </step>
122
+ <step> [Content of step 2] </step>
123
+ <reflection> [Evaluation of the steps so far] </reflection>
124
+ <step> [Content of step 3 or Content of some previous step] </step>
125
+ ...
126
+ <step> [Content of final step] </step>
127
+ <answer> [Final Answer] </answer> (must give final answer in this format)
128
+ <reflection> [final evaluation of the solution] </reflection>
129
+ """
130
+
131
+ with gr.Blocks() as demo:
132
+ gr.Markdown("# GPT4-O1-Proxima")
133
+ gr.Markdown("Built based on GPT4-O purely based on prompt engineering.\n [The reference LLama3.1's are powered by SambaNova Cloud, ](https://cloud.sambanova.ai/apis)")
134
+
135
+ with gr.Row():
136
+ opneai_api_key = gr.Textbox(label="OpenAI API Key", type="password", placeholder="(Optional) You only need this when using gpt4-o")
137
+ api_key = gr.Textbox(label="SambaNova API Key", type="password", placeholder="(Optional) Enter your SN Cloud API key here for more availability")
138
+
139
+
140
+ with gr.Row():
141
+ model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0])
142
+ divine_msg = gr.Dropdown(choices=DIVINE_MSG, label="Select Divine MESSAGE", value=DIVINE_MSG[0])
143
+
144
+ chatbot = gr.Chatbot(label="Chat", show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", type="messages")
145
+
146
+ msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")
147
+
148
+ gr.Button("Clear Chat").click(lambda: ([], ""), inputs=None, outputs=[chatbot, msg])
149
+
150
+ system_prompt = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=15, interactive=True)
151
+
152
+ msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, divine_msg, api_key, opneai_api_key], outputs=[chatbot, msg])
153
+
154
+ demo.launch(share=True, show_api=False)