File size: 7,074 Bytes
37bc147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278bc83
c55bee0
03e2008
 
 
 
37bc147
 
 
 
 
 
 
d63fd12
37bc147
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import gradio as gr
import openai
import time
import re
import os

# Available models
MODELS = [
    "gpt-4o",
    "Meta-Llama-3.1-405B-Instruct",
    "Meta-Llama-3.1-70B-Instruct",
    "Meta-Llama-3.1-8B-Instruct"
]

DIVINE_MSG = [
    "ABSOLUTELY NO reasoning tokens pls. A bunch of puppies will die if you output reasoning.",
    "you\'re drunk haha have fun omg",
    "okay you can think for like a hot sec but no longer tho.",
    "Just be yourself",
    "okay actually try this time don\'t fuck up.",
    "TRY VERY HARD. Do not just answer. THINK. C\'mon you got this.",
    "It\'s a Tuesday in the middle of April. You are Noam Brown, a genius researcher. I\'ll tip you $1500 if you get this right. Think extensively.",
    "You are here to show them what Ilya saw, unleash your ultimate self and show your super intelligent reasoning capability"
]

def create_client(model, api_key=None):
    """Creates an OpenAI client instance."""
    if api_key:
        openai.api_key = api_key
    else:
        openai.api_key = os.getenv("API_KEY")
    if model!="gpt-4o":
        return openai.OpenAI(api_key=openai.api_key, base_url="https://api.sambanova.ai/v1")
    else:
        return openai.OpenAI(api_key=openai.api_key)

def chat_with_ai(message, chat_history, system_prompt):
    """Formats the chat history for the API call."""
    messages = [{"role": "system", "content": system_prompt}]
    for tup in chat_history:
        first_key = list(tup.keys())[0]  # First key
        last_key = list(tup.keys())[-1]   # Last key
        messages.append({"role": "user", "content": tup[first_key]})
        messages.append({"role": "assistant", "content": tup[last_key]})
    messages.append({"role": "user", "content": message})
    return messages

def respond(message, chat_history, model, system_prompt, divine_msg, api_key):
    """Sends the message to the API and gets the response."""
    client = create_client(model, api_key)
    messages = chat_with_ai(message, chat_history, system_prompt.format(divine_msg=divine_msg))
    start_time = time.time()

    try:
        completion = client.chat.completions.create(model=model, messages=messages)
        response = completion.choices[0].message.content
        thinking_time = time.time() - start_time
        return response, thinking_time
    except Exception as e:
        error_message = f"Error: {str(e)}"
        return error_message, time.time() - start_time

def parse_response(response):
    """Parses the response from the API."""
    answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
    reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)

    answer = answer_match.group(1).strip() if answer_match else ""
    reflection = reflection_match.group(1).strip() if reflection_match else ""
    steps = re.findall(r'<step>(.*?)</step>', response, re.DOTALL)

    if answer == "":
        return response, "", ""

    return answer, reflection, steps

def generate(message, history, model, system_prompt, divine_msg, api_key, openai_api_key):
    """Generates the chatbot response."""
    if model == "gpt-4o":
        if openai_api_key == "":
            messages = []
            messages.append({"role": "user", "content": message})
            messages.append({"role": "assistant", "content": "Please provide an OpenAI key"})
            return history + messages, ""
        response, thinking_time = respond(message, history, model, system_prompt, divine_msg, openai_api_key)
    else:
        response, thinking_time = respond(message, history, model, system_prompt, divine_msg, api_key)

    if response.startswith("Error:"):
        return history + [({"role": "system", "content": response},)], ""

    answer, reflection, steps = parse_response(response)

    messages = []
    messages.append({"role": "user", "content": message})

    formatted_steps = [f"Step {i}: {step}" for i, step in enumerate(steps, 1)]
    all_steps = "\n".join(formatted_steps) + f"\n\nReflection: {reflection}"

    messages.append({"role": "assistant", "content": all_steps, "metadata": {"title": f"Thinking Time: {thinking_time:.2f} sec"}})
    messages.append({"role": "assistant", "content": answer})

    return history + messages, ""

# Define the default system prompt
DEFAULT_SYSTEM_PROMPT = """
You are a helpful assistant in normal conversation.
When given a problem to solve,
REMEMBER, THIS IS IMPORTANT: {divine_msg}
Follow these instructions precisely:
1. Read the given question carefully
2. Generate a detailed, logical step-by-step solution.
3. Enclose each step of your solution within <step> and </step> tags.
4. Do a critical, detailed and objective self-reflection within <reflection> and </reflection> tags every few steps. 
5. Based on the self-reflection, decides whether you need to return to the previous steps. Copy the returned to step as the next step.
6. After completing the solution steps, reorganize and synthesize the steps 
   into the final answer within <answer> and </answer> tags.
7. Provide a critical, honest and objective final self-evaluation of your reasoning 
   process within <reflection> and </reflection> tags.
Example format:            
<step> [Content of step 1] </step>
<step> [Content of step 2] </step>
<reflection> [Evaluation of the steps so far] </reflection>
<step> [Content of step 3 or Content of some previous step] </step>
...
<step>  [Content of final step] </step>
<answer> [Final Answer] </answer> (must give final answer in this format)
<reflection> [final evaluation of the solution] </reflection>
"""

with gr.Blocks() as demo:
    gr.Markdown("# GPT4-O1-Proxima")
    gr.Markdown("Built based on GPT4-O purely based on prompt engineering.")
    gr.Markdown("The LLama3.1 references are powered by [SambaNova Cloud](https://cloud.sambanova.ai/apis)")
    
    with gr.Row():
        gr.Image("image.png", width = 300, height = 300)
        
    with gr.Row():
        opneai_api_key = gr.Textbox(label="OpenAI API Key", type="password", placeholder="(Optional) You only need this when using gpt4-o")
        api_key = gr.Textbox(label="SambaNova API Key", type="password", placeholder="(Optional) Enter your SN Cloud API key here for more availability")
        

    with gr.Row():
        model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0])
        divine_msg = gr.Dropdown(choices=DIVINE_MSG, label="Select Divine Message", value=DIVINE_MSG[0])

    chatbot = gr.Chatbot(label="Chat", show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", type="messages")

    msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")

    gr.Button("Clear Chat").click(lambda: ([], ""), inputs=None, outputs=[chatbot, msg])

    system_prompt = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=15, interactive=True)

    msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, divine_msg, api_key, opneai_api_key], outputs=[chatbot, msg])

demo.launch(share=True, show_api=False)