import json import logging import multiprocessing import os import gradio as gr from swiftsage.agents import SwiftSage from swiftsage.utils.commons import PromptTemplate, api_configs, setup_logging from pkg_resources import resource_filename ENGINE = "Together" SWIFT_MODEL_ID = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo" FEEDBACK_MODEL_ID = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" SAGE_MODEL_ID = "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo" # ENGINE = "Groq" # SWIFT_MODEL_ID = "llama-3.1-8b-instant" # FEEDBACK_MODEL_ID = "llama-3.1-8b-instant" # SAGE_MODEL_ID = "llama-3.1-70b-versatile" # ENGINE = "SambaNova" # SWIFT_MODEL_ID = "Meta-Llama-3.1-8B-Instruct" # FEEDBACK_MODEL_ID = "Meta-Llama-3.1-70B-Instruct" # SAGE_MODEL_ID = "Meta-Llama-3.1-405B-Instruct" def solve_problem(problem, max_iterations, reward_threshold, swift_model_id, sage_model_id, feedback_model_id, use_retrieval, start_with_sage, swift_temperature, swift_top_p, sage_temperature, sage_top_p, feedback_temperature, feedback_top_p): global ENGINE # Configuration for each LLM max_iterations = int(max_iterations) reward_threshold = int(reward_threshold) swift_config = { "model_id": swift_model_id, "api_config": api_configs[ENGINE], "temperature": float(swift_temperature), "top_p": float(swift_top_p), "max_tokens": 2048, } feedback_config = { "model_id": feedback_model_id, "api_config": api_configs[ENGINE], "temperature": float(feedback_temperature), "top_p": float(feedback_top_p), "max_tokens": 2048, } sage_config = { "model_id": sage_model_id, "api_config": api_configs[ENGINE], "temperature": float(sage_temperature), "top_p": float(sage_top_p), "max_tokens": 2048, } # specify the path to the prompt templates # prompt_template_dir = './swiftsage/prompt_templates' # prompt_template_dir = resource_filename('swiftsage', 'prompt_templates') # Try multiple locations for the prompt templates possible_paths = [ resource_filename('swiftsage', 'prompt_templates'), os.path.join(os.path.dirname(__file__), '..', 'swiftsage', 'prompt_templates'), os.path.join(os.path.dirname(__file__), 'swiftsage', 'prompt_templates'), '/app/swiftsage/prompt_templates', # For Docker environments ] prompt_template_dir = None for path in possible_paths: if os.path.exists(path): prompt_template_dir = path break dataset = [] embeddings = [] # TODO: for retrieval augmentation (not implemented yet now) s2 = SwiftSage( dataset, embeddings, prompt_template_dir, swift_config, sage_config, feedback_config, use_retrieval=use_retrieval, start_with_sage=start_with_sage, ) reasoning, solution, messages = s2.solve(problem, max_iterations, reward_threshold) reasoning = reasoning.replace("The generated code is:", "\n---\nThe generated code is:").strip() solution = solution.replace("Answer (from running the code):\n ", " ").strip() # generate HTML for the log messages and display them with wrap and a scroll bar and a max height in the code block with log style log_messages = "
" + "\n".join(messages) + "
"
return reasoning, solution, log_messages
with gr.Blocks(theme=gr.themes.Soft()) as demo:
# gr.Markdown("## SwiftSage: A Multi-Agent Framework for Reasoning")
# use the html and center the title
gr.HTML("No log messages yet.
") solve_button.click( solve_problem, inputs=[problem, max_iterations, reward_threshold, swift_model_id, sage_model_id, feedback_model_id, use_retrieval, start_with_sage, temperature_swift, top_p_swift, temperature_sage, top_p_sage, temperature_feedback, top_p_feedback], outputs=[reasoning_output, solution_output, log_output], ) if __name__ == '__main__': # make logs dir if it does not exist if not os.path.exists('logs'): os.makedirs('logs') multiprocessing.set_start_method('spawn') demo.launch(share=False, show_api=False)