Added history and Better UI (#7)
Browse files- Update requirements.txt (8186757df9cd4e8c124f9e8d8544bef8c2e323e1)
- Update app.py (11dcd687af7926659ec40b082561a7c9462889f6)
Co-authored-by: Nishith Jain <[email protected]>
- app.py +64 -123
- requirements.txt +1 -1
app.py
CHANGED
@@ -11,127 +11,113 @@ MODELS = [
|
|
11 |
"Meta-Llama-3.1-8B-Instruct"
|
12 |
]
|
13 |
|
|
|
|
|
|
|
14 |
def create_client(api_key=None):
|
15 |
-
|
16 |
if api_key:
|
17 |
openai.api_key = api_key
|
18 |
else:
|
19 |
openai.api_key = os.getenv("API_KEY")
|
20 |
|
21 |
-
|
22 |
-
messages = [
|
23 |
-
{"role": "system", "content": system_prompt},
|
24 |
-
]
|
25 |
-
|
26 |
-
for human, ai in chat_history:
|
27 |
-
messages.append({"role": "user", "content": human})
|
28 |
-
messages.append({"role": "assistant", "content": ai})
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
messages.append({"role": "user", "content": message})
|
31 |
-
|
32 |
return messages
|
33 |
|
34 |
def respond(message, chat_history, model, system_prompt, thinking_budget, api_key):
|
35 |
-
|
36 |
-
create_client(api_key)
|
37 |
messages = chat_with_ai(message, chat_history, system_prompt.format(budget=thinking_budget))
|
38 |
start_time = time.time()
|
39 |
|
40 |
try:
|
41 |
-
|
42 |
-
|
43 |
-
model=model,
|
44 |
-
messages=messages,
|
45 |
-
stream=False # Set to False for synchronous response
|
46 |
-
)
|
47 |
-
response = completion.choices[0].message['content']
|
48 |
thinking_time = time.time() - start_time
|
49 |
-
print("Response received from OpenAI API.")
|
50 |
return response, thinking_time
|
51 |
except Exception as e:
|
52 |
error_message = f"Error: {str(e)}"
|
53 |
-
print(error_message)
|
54 |
return error_message, time.time() - start_time
|
55 |
|
56 |
def parse_response(response):
|
|
|
57 |
answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
|
58 |
reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)
|
59 |
|
60 |
answer = answer_match.group(1).strip() if answer_match else ""
|
61 |
reflection = reflection_match.group(1).strip() if reflection_match else ""
|
62 |
-
|
63 |
steps = re.findall(r'<step>(.*?)</step>', response, re.DOTALL)
|
64 |
-
if answer is not "":
|
65 |
-
return answer, reflection, steps
|
66 |
-
else:
|
67 |
-
return response, "", ""
|
68 |
|
69 |
-
|
70 |
-
print(f"Received message: {message}")
|
71 |
-
#if not api_key:
|
72 |
-
# print("API key missing")
|
73 |
-
# return history + [("System", "Please provide your API Key before starting the chat.")]
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
error_msg = f"System prompt missing placeholder: {str(e)}"
|
79 |
-
print(error_msg)
|
80 |
-
return history + [("System", error_msg)]
|
81 |
-
|
82 |
-
response, thinking_time = respond(message, history, model, formatted_system_prompt, thinking_budget, api_key)
|
83 |
|
84 |
if response.startswith("Error:"):
|
85 |
-
return history + [("
|
86 |
|
87 |
answer, reflection, steps = parse_response(response)
|
88 |
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
92 |
|
93 |
-
|
|
|
94 |
|
95 |
-
|
96 |
-
return history + [(message, formatted_response)]
|
97 |
|
98 |
# Define the default system prompt
|
99 |
-
|
100 |
You are a helpful assistant in normal conversation.
|
101 |
-
When given a problem to solve, you are an expert problem-solving assistant.
|
102 |
-
|
|
|
103 |
1. Read the given question carefully and reset counter between <count> and </count> to {budget}
|
104 |
2. Generate a detailed, logical step-by-step solution.
|
105 |
3. Enclose each step of your solution within <step> and </step> tags.
|
106 |
-
4. You are allowed to use at most {budget} steps (starting budget),
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
Example format:
|
113 |
<count> [starting budget] </count>
|
114 |
-
|
115 |
<step> [Content of step 1] </step>
|
116 |
<count> [remaining budget] </count>
|
117 |
-
|
118 |
<step> [Content of step 2] </step>
|
119 |
<reflection> [Evaluation of the steps so far] </reflection>
|
120 |
<reward> [Float between 0.0 and 1.0] </reward>
|
121 |
<count> [remaining budget] </count>
|
122 |
-
|
123 |
<step> [Content of step 3 or Content of some previous step] </step>
|
124 |
<count> [remaining budget] </count>
|
125 |
-
|
126 |
...
|
127 |
-
|
128 |
<step> [Content of final step] </step>
|
129 |
<count> [remaining budget] </count>
|
130 |
-
|
131 |
-
<answer> [Final Answer] </answer>
|
132 |
-
|
133 |
<reflection> [Evaluation of the solution] </reflection>
|
134 |
-
|
135 |
<reward> [Float between 0.0 and 1.0] </reward>
|
136 |
"""
|
137 |
|
@@ -140,65 +126,20 @@ with gr.Blocks() as demo:
|
|
140 |
gr.Markdown("[Powered by Llama3.1 models through SambaNova Cloud API](https://sambanova.ai/fast-api?api_ref=907266)")
|
141 |
|
142 |
with gr.Row():
|
143 |
-
api_key = gr.Textbox(
|
144 |
-
label="API Key",
|
145 |
-
type="password",
|
146 |
-
placeholder="(Optional) Enter your API key here for more availability"
|
147 |
-
)
|
148 |
|
149 |
with gr.Row():
|
150 |
-
model = gr.Dropdown(
|
151 |
-
|
152 |
-
label="Select Model",
|
153 |
-
value=MODELS[0]
|
154 |
-
)
|
155 |
-
thinking_budget = gr.Slider(
|
156 |
-
minimum=1,
|
157 |
-
maximum=100,
|
158 |
-
value=10,
|
159 |
-
step=1,
|
160 |
-
label="Thinking Budget"
|
161 |
-
)
|
162 |
-
|
163 |
-
system_prompt = gr.Textbox(
|
164 |
-
label="System Prompt",
|
165 |
-
value=default_system_prompt,
|
166 |
-
lines=15,
|
167 |
-
interactive=True
|
168 |
-
)
|
169 |
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
chatbot =
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
# Initialize chat history as a Gradio state
|
183 |
-
chat_history = gr.State([])
|
184 |
-
|
185 |
-
def handle_submit(message, history, model, system_prompt, thinking_budget, api_key):
|
186 |
-
updated_history = process_chat(message, history, model, system_prompt, thinking_budget, api_key)
|
187 |
-
return updated_history, ""
|
188 |
-
|
189 |
-
def handle_clear():
|
190 |
-
return [], ""
|
191 |
-
|
192 |
-
submit.click(
|
193 |
-
handle_submit,
|
194 |
-
inputs=[msg, chat_history, model, system_prompt, thinking_budget, api_key],
|
195 |
-
outputs=[chatbot, msg]
|
196 |
-
)
|
197 |
-
|
198 |
-
clear.click(
|
199 |
-
handle_clear,
|
200 |
-
inputs=None,
|
201 |
-
outputs=[chatbot, msg]
|
202 |
-
)
|
203 |
-
|
204 |
-
demo.launch()
|
|
|
11 |
"Meta-Llama-3.1-8B-Instruct"
|
12 |
]
|
13 |
|
14 |
+
# Sambanova API base URL
|
15 |
+
API_BASE = "https://api.sambanova.ai/v1"
|
16 |
+
|
17 |
def create_client(api_key=None):
|
18 |
+
"""Creates an OpenAI client instance."""
|
19 |
if api_key:
|
20 |
openai.api_key = api_key
|
21 |
else:
|
22 |
openai.api_key = os.getenv("API_KEY")
|
23 |
|
24 |
+
return openai.OpenAI(api_key=openai.api_key, base_url=API_BASE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
def chat_with_ai(message, chat_history, system_prompt):
|
27 |
+
"""Formats the chat history for the API call."""
|
28 |
+
messages = [{"role": "system", "content": system_prompt}]
|
29 |
+
print(type(chat_history))
|
30 |
+
for tup in chat_history:
|
31 |
+
print(type(tup))
|
32 |
+
first_key = list(tup.keys())[0] # First key
|
33 |
+
last_key = list(tup.keys())[-1] # Last key
|
34 |
+
messages.append({"role": "user", "content": tup[first_key]})
|
35 |
+
messages.append({"role": "assistant", "content": tup[last_key]})
|
36 |
messages.append({"role": "user", "content": message})
|
|
|
37 |
return messages
|
38 |
|
39 |
def respond(message, chat_history, model, system_prompt, thinking_budget, api_key):
|
40 |
+
"""Sends the message to the API and gets the response."""
|
41 |
+
client = create_client(api_key)
|
42 |
messages = chat_with_ai(message, chat_history, system_prompt.format(budget=thinking_budget))
|
43 |
start_time = time.time()
|
44 |
|
45 |
try:
|
46 |
+
completion = client.chat.completions.create(model=model, messages=messages)
|
47 |
+
response = completion.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
48 |
thinking_time = time.time() - start_time
|
|
|
49 |
return response, thinking_time
|
50 |
except Exception as e:
|
51 |
error_message = f"Error: {str(e)}"
|
|
|
52 |
return error_message, time.time() - start_time
|
53 |
|
54 |
def parse_response(response):
|
55 |
+
"""Parses the response from the API."""
|
56 |
answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
|
57 |
reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)
|
58 |
|
59 |
answer = answer_match.group(1).strip() if answer_match else ""
|
60 |
reflection = reflection_match.group(1).strip() if reflection_match else ""
|
|
|
61 |
steps = re.findall(r'<step>(.*?)</step>', response, re.DOTALL)
|
|
|
|
|
|
|
|
|
62 |
|
63 |
+
return answer, reflection, steps
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
def generate(message, history, model, system_prompt, thinking_budget, api_key):
|
66 |
+
"""Generates the chatbot response."""
|
67 |
+
response, thinking_time = respond(message, history, model, system_prompt, thinking_budget, api_key)
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
if response.startswith("Error:"):
|
70 |
+
return history + [({"role": "system", "content": response},)], ""
|
71 |
|
72 |
answer, reflection, steps = parse_response(response)
|
73 |
|
74 |
+
messages = []
|
75 |
+
messages.append({"role": "user", "content": message})
|
76 |
+
|
77 |
+
formatted_steps = [f"Step {i}: {step}" for i, step in enumerate(steps, 1)]
|
78 |
+
all_steps = "\n".join(formatted_steps) + f"\n\nReflection: {reflection}"
|
79 |
|
80 |
+
messages.append({"role": "assistant", "content": all_steps, "metadata": {"title": f"Thinking Time: {thinking_time:.2f} sec"}})
|
81 |
+
messages.append({"role": "assistant", "content": answer})
|
82 |
|
83 |
+
return history + messages, ""
|
|
|
84 |
|
85 |
# Define the default system prompt
|
86 |
+
DEFAULT_SYSTEM_PROMPT = """
|
87 |
You are a helpful assistant in normal conversation.
|
88 |
+
When given a problem to solve, you are an expert problem-solving assistant.
|
89 |
+
Your task is to provide a detailed, step-by-step solution to a given question.
|
90 |
+
Follow these instructions carefully:
|
91 |
1. Read the given question carefully and reset counter between <count> and </count> to {budget}
|
92 |
2. Generate a detailed, logical step-by-step solution.
|
93 |
3. Enclose each step of your solution within <step> and </step> tags.
|
94 |
+
4. You are allowed to use at most {budget} steps (starting budget),
|
95 |
+
keep track of it by counting down within tags <count> </count>,
|
96 |
+
STOP GENERATING MORE STEPS when hitting 0, you don't have to use all of them.
|
97 |
+
5. Do a self-reflection when you are unsure about how to proceed,
|
98 |
+
based on the self-reflection and reward, decides whether you need to return
|
99 |
+
to the previous steps.
|
100 |
+
6. After completing the solution steps, reorganize and synthesize the steps
|
101 |
+
into the final answer within <answer> and </answer> tags.
|
102 |
+
7. Provide a critical, honest and subjective self-evaluation of your reasoning
|
103 |
+
process within <reflection> and </reflection> tags.
|
104 |
+
8. Assign a quality score to your solution as a float between 0.0 (lowest
|
105 |
+
quality) and 1.0 (highest quality), enclosed in <reward> and </reward> tags.
|
106 |
Example format:
|
107 |
<count> [starting budget] </count>
|
|
|
108 |
<step> [Content of step 1] </step>
|
109 |
<count> [remaining budget] </count>
|
|
|
110 |
<step> [Content of step 2] </step>
|
111 |
<reflection> [Evaluation of the steps so far] </reflection>
|
112 |
<reward> [Float between 0.0 and 1.0] </reward>
|
113 |
<count> [remaining budget] </count>
|
|
|
114 |
<step> [Content of step 3 or Content of some previous step] </step>
|
115 |
<count> [remaining budget] </count>
|
|
|
116 |
...
|
|
|
117 |
<step> [Content of final step] </step>
|
118 |
<count> [remaining budget] </count>
|
119 |
+
<answer> [Final Answer] </answer> (must give final answer in this format)
|
|
|
|
|
120 |
<reflection> [Evaluation of the solution] </reflection>
|
|
|
121 |
<reward> [Float between 0.0 and 1.0] </reward>
|
122 |
"""
|
123 |
|
|
|
126 |
gr.Markdown("[Powered by Llama3.1 models through SambaNova Cloud API](https://sambanova.ai/fast-api?api_ref=907266)")
|
127 |
|
128 |
with gr.Row():
|
129 |
+
api_key = gr.Textbox(label="API Key", type="password", placeholder="(Optional) Enter your API key here for more availability")
|
|
|
|
|
|
|
|
|
130 |
|
131 |
with gr.Row():
|
132 |
+
model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0])
|
133 |
+
thinking_budget = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Thinking Budget", info="maximum times a model can think")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
+
chatbot = gr.Chatbot(label="Chat", show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", type="messages")
|
136 |
+
|
137 |
+
msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")
|
138 |
+
|
139 |
+
gr.Button("Clear Chat").click(lambda: ([], ""), inputs=None, outputs=[chatbot, msg])
|
140 |
+
|
141 |
+
system_prompt = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=15, interactive=True)
|
142 |
+
|
143 |
+
msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, thinking_budget, api_key], outputs=[chatbot, msg])
|
144 |
+
|
145 |
+
demo.launch(share=True, show_api=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1 +1 @@
|
|
1 |
-
openai==
|
|
|
1 |
+
openai==1.45.1
|