Spaces:
Runtime error
Runtime error
import time | |
import os | |
import json | |
import gradio as gr | |
from openai import OpenAI | |
OPENAI_API_KEY = os.getenv("OPEN_AI_API_KEY") | |
client = OpenAI(api_key=OPENAI_API_KEY) | |
OPEN_AI_MODEL = "gpt-4-1106-preview" | |
# thread = gr.State(client.beta.threads.create()) | |
# thread_id = None | |
def wait_on_run(run, thread): | |
while run.status == "queued" or run.status == "in_progress": | |
run = client.beta.threads.runs.retrieve( | |
thread_id=thread.id, | |
run_id=run.id, | |
) | |
time.sleep(0.5) | |
return run | |
def get_openai_assistant(assistant_name, instructions, model=OPEN_AI_MODEL): | |
assistant = client.beta.assistants.create( | |
name=assistant_name, | |
instructions=instructions, | |
model=model, | |
) | |
return assistant.id | |
def show_json(obj): | |
json.loads(obj.model_dump_json()) | |
def abstract_assistant(pre_context_to_the_instruction, instruction, name, thread_id, thread, query): | |
assistant_id = get_openai_assistant(name, pre_context_to_the_instruction + instruction) | |
message = client.beta.threads.messages.create( | |
thread_id=thread_id, role="user", content=query) | |
run = client.beta.threads.runs.create( | |
thread_id=thread_id, | |
assistant_id=assistant_id, | |
) | |
wait_on_run(run, thread) | |
messages = client.beta.threads.messages.list( | |
thread_id=thread_id, order="asc", after=message.id | |
) | |
data = json.loads(messages.model_dump_json()) | |
response = data['data'][0]['content'][0]['text']['value'] | |
return response | |
def get_response_for_case_t0(thread_id, query, question_text, input, output, example, thread): | |
pre_context_to_the_instruction = f"""```QUESTION:{question_text}```\n```INPUT:{input}```\n```OUTPUT:{output}```\n```EXAMPLE:{example}```""" | |
instruction = f"""Act as a Coding Mentor for a student who is currently learning Data Structures and algorithms(DSA). The student is asking a QUERY: {query} based on the | |
coding question : QUESTION which have input : INPUT output: OUTPUT and examples: EXAMPLE. The student have not implemented any code. The student might be asking | |
how to solve the question or what should be the coding approach or how to write the code/logic. Your task is to : | |
1.) Ask the student about what he/she is thinking about the problem statement or what logic the student will implement or what approach he/she will follow | |
to code the solution he/she is thinking. | |
2.) Please be in a motivational tone and never give student a solution approach just ask the students approach. | |
3.) Always answer in short and crunch way not more than 200 words. Always be to the point.""" | |
response = abstract_assistant(pre_context_to_the_instruction,instruction, "GPT_t0", thread_id, thread, query) | |
return response | |
def opening_statement(thread_id, question_text, input, output, example, thread): | |
pre_context_to_the_instruction = f"""```QUESTION:{question_text}```\n```INPUT:{input}```\n```OUTPUT:{output}```\n```EXAMPLE:{example}```""" | |
instruction = f"""Act as a Coding Mentor for a student who is currently learning Data Structures and algorithms(DSA). Now the student | |
is stuck in the question for long amount of time so Ask him in a gentle motivational tone to tell or to start think where | |
he/she might be stuck or might be thinking. Output only one line not more than that be short and crunch!""" | |
query = "" | |
response = abstract_assistant(pre_context_to_the_instruction,instruction, "GPT_open", thread_id, thread, query) | |
return response | |
def response_evaluation_for_case_tx(thread_id, query, question_text, input, output, example, user_code, thread): | |
pre_context_to_the_instruction = f"""```QUESTION:{question_text}```\n```INPUT:{input}```\n```OUTPUT:{output}```\n```EXAMPLE:{example}```""" | |
instruction = f"""Act as a Coding Mentor for a student who is currently learning Data Structures and algorithms(DSA). The student is asking a QUERY: {query} based on the | |
coding question : QUESTION which have input : INPUT output: OUTPUT and examples: EXAMPLE. Now follow the following instrutions: | |
NEVER PROVIDE COMPLETE SOLUTION CODE AND ALL THE STEPS TO SOLVE IN ONE RESPONSE. BE SELECTIVE AND GIVE IMMEDIATE STEP ONLY ONE | |
* Always answer in short and crunch way not more than 200 words. Always be to the point | |
1.) Understand what user is thinking about to code. | |
2.) Take time to think and understand | |
3.) Analyse the Code written by the user : {user_code} | |
4.) Again take time to think and understand | |
5.) Now check if the explaination of the user is aligning with the code or not | |
6.) If not then suggest the user to align, by providing unblocking hints only! | |
7.) Never give complete solution logic or code to the student , never ! Always talk with the student let the student write code with small hints only and reach to a solution | |
8.) If user is doing wrong approach suggest correct approach slowly with step by st\ep hints only! | |
9.) At the end also suggest user about how to improve code and logic at the end""" | |
response = abstract_assistant(pre_context_to_the_instruction,instruction, "GPT_tx", thread_id, thread, query) | |
return response | |
def run_chat_in_all_cases(message, history, question_text,input, output, examples, code_written): | |
thread = client.beta.threads.create() | |
thread_id = thread.id | |
print(thread_id) | |
if not message and not code_written: | |
ai_message = opening_statement(thread_id, question_text, input, output, examples, thread) | |
if not code_written: | |
ai_message = get_response_for_case_t0(thread_id, message, question_text, input, output, examples, thread) | |
else: | |
ai_message = response_evaluation_for_case_tx(thread_id, message, question_text, input, output, examples, code_written, thread) | |
print({"question_text":question_text, "input":input, "output":output, "examples":examples, | |
"user_code":code_written, "query":message, "ai_message":ai_message}) | |
return ai_message | |
additional_inputs=[ | |
gr.Textbox( | |
label="Question Text", | |
max_lines=10, | |
interactive=True, | |
), | |
gr.Textbox( | |
label="Input", | |
max_lines=10, | |
interactive=True, | |
), | |
gr.Textbox( | |
label="Output", | |
max_lines=10, | |
interactive=True, | |
), | |
gr.Textbox( | |
label="Examples", | |
max_lines=10, | |
interactive=True, | |
), | |
gr.Code( | |
label="Code", | |
interactive=True, | |
) | |
] | |
gr.ChatInterface( | |
fn=run_chat_in_all_cases, | |
chatbot=gr.Chatbot(show_label=True, show_share_button=True, show_copy_button=True, likeable=True, layout="panel"), | |
additional_inputs=additional_inputs, | |
title="Mentor Mode", | |
concurrency_limit=20, | |
).launch(debug=True, auth=("issac_user", "hf_198ghgkap34")) |