File size: 5,330 Bytes
298d7d8
 
 
 
 
39dd26a
298d7d8
99ced92
298d7d8
 
 
7a509e3
298d7d8
 
7a509e3
298d7d8
 
 
 
 
 
 
 
 
 
 
f66a341
 
 
 
 
 
 
 
 
 
 
 
39dd26a
 
 
 
 
7a509e3
 
 
 
 
 
39dd26a
 
 
 
 
 
 
7a509e3
 
 
39dd26a
 
7a509e3
 
 
 
2f84176
7a509e3
 
 
 
 
 
 
 
 
2f84176
7a509e3
 
 
 
 
 
 
 
 
 
 
 
39dd26a
 
 
 
 
 
 
 
 
7a509e3
 
 
 
 
 
39dd26a
 
7a509e3
 
 
 
 
 
 
 
 
 
 
99ced92
39dd26a
 
 
99ced92
298d7d8
 
 
f66a341
7a509e3
298d7d8
99ced92
298d7d8
f66a341
7a509e3
298d7d8
7a509e3
f66a341
99ced92
298d7d8
 
 
 
99ced92
f66a341
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import gradio as gr
from huggingface_hub import InferenceClient
import sys
import io
import traceback
import re  # Import the regular expressions module

# Initialize the AI model
model_name = "Qwen/Qwen2.5-72B-Instruct"
client = InferenceClient(model_name)

def llm_inference(messages):
    eos_token = "<|endoftext|>"
    output = client.chat.completions.create(
        messages=messages,
        stream=False,
        temperature=0.7,
        top_p=0.1,
        max_tokens=412,
        stop=[eos_token]
    )
    response = ''
    for choice in output.choices:
        response += choice['message']['content']
    return response

def execute_code(code):
    old_stdout = sys.stdout
    redirected_output = sys.stdout = io.StringIO()
    try:
        exec(code, {})
        output = redirected_output.getvalue()
    except Exception as e:
        output = f"Error: {e}\n{traceback.format_exc()}"
    finally:
        sys.stdout = old_stdout
    return output

def is_math_task(user_input):
    """
    Simple heuristic to determine if the user input is a math task.
    This can be enhanced with more sophisticated methods or NLP techniques.
    """
    math_keywords = [
        'calculate', 'compute', 'solve', 'integrate', 'differentiate',
        'derivative', 'integral', 'factorial', 'sum', 'product',
        'average', 'mean', 'median', 'mode', 'variance', 'standard deviation',
        'limit', 'matrix', 'determinant', 'equation', 'expression'
    ]
    operators = ['+', '-', '*', '/', '^', '**', 'sqrt', 'sin', 'cos', 'tan', 'log', 'exp']
    user_input_lower = user_input.lower()
    return any(keyword in user_input_lower for keyword in math_keywords) or any(op in user_input for op in operators)

def chat(user_input, history):
    """
    Handles the chat interaction. If the user input is detected as a math task,
    it performs a two-step process:
    1. Generates an explanation of how to solve the task.
    2. Generates Python code based on the explanation and executes it.
    """
    if is_math_task(user_input):
        # Step 1: Generate Explanation
        explanation_messages = [
            {
                "role": "system",
                "content": "Provide a very small explanation on how to approach the following mathematical task without calculating the answer."
            },
            {
                "role": "user",
                "content": f"Provide a short explanation on how to solve the following mathematical problem: {user_input}"
            },
        ]
        explanation = llm_inference(explanation_messages)
        
        # Step 2: Generate Python Code using Explanation and User Prompt
        code_prompt = f"Based on the following explanation, write a Python program to solve the mathematical task using Python. Ensure that the program includes a print statement to output the answer. Write only code. Writing any comments or anything else is prohibited. \n\nExplanation: {explanation}\n\nTask: {user_input}"
        
        code_messages = [
            {
                "role": "system",
                "content": "You are a Python developer. Write Python code based on the provided explanation and task."
            },
            {
                "role": "user",
                "content": f"{code_prompt}"
            },
        ]
        generated_code = llm_inference(code_messages)
        
        # Strip code tags using regex
        # This regex removes ```python and ``` or any other markdown code fences
        cleaned_code = re.sub(r"```(?:python)?\n?", "", generated_code).strip()
        cleaned_code = re.sub(r"```", "", cleaned_code).strip()
        
        # Execute the cleaned code
        execution_result = execute_code(cleaned_code)
        
        # Prepare the assistant response
        assistant_response = (
            f"**Explanation:**\n{explanation}\n\n"
            f"**Generated Python Code:**\n```python\n{cleaned_code}\n```\n\n"
            f"**Execution Result:**\n```\n{execution_result}\n```"
        )
    else:
        # For regular chat messages, use the AI's response
        messages = [
            {
                "role": "system",
                "content": "You are a helpful assistant."
            },
            {
                "role": "user",
                "content": user_input
            },
        ]
        assistant_response = llm_inference(messages)
    
    # Append to chat history
    history.append((user_input, assistant_response))
    return history, history

with gr.Blocks() as demo:
    gr.Markdown("# 🐍 Python Helper Chatbot")
    with gr.Tab("Chat"):
        chatbot = gr.Chatbot()
        msg = gr.Textbox(placeholder="Type your message here...", label="Your Message")
        msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
    
    with gr.Tab("Interpreter"):
        gr.Markdown("### 🖥️ Test Your Code")
        code_input = gr.Code(language="python", label="Python Code Input")
        run_button = gr.Button("Run Code")
        code_output = gr.Textbox(label="Output", lines=10)
        run_button.click(execute_code, inputs=code_input, outputs=code_output)
    
    with gr.Tab("Logs"):
        gr.Markdown("### 📜 Logs")
        log_output = gr.Textbox(label="Logs", lines=10, interactive=False)

# Launch the Gradio app
demo.launch()