|
from autogen import ConversableAgent, AssistantAgent |
|
from autogen.coding import LocalCommandLineCodeExecutor |
|
|
|
|
|
|
|
|
|
def run_multi_agent(llm, message): |
|
llm_config = {"model": llm} |
|
|
|
executor = LocalCommandLineCodeExecutor( |
|
timeout=60, |
|
work_dir="coding", |
|
) |
|
|
|
code_executor_agent = ConversableAgent( |
|
name="code_executor_agent", |
|
llm_config=False, |
|
code_execution_config={"executor": executor}, |
|
human_input_mode="NEVER", |
|
default_auto_reply= |
|
"Please continue. If everything is done, reply 'TERMINATE'.", |
|
) |
|
|
|
code_writer_agent = AssistantAgent( |
|
name="code_writer_agent", |
|
llm_config=llm_config, |
|
code_execution_config=False, |
|
human_input_mode="NEVER", |
|
) |
|
|
|
code_writer_agent_system_message = code_writer_agent.system_message |
|
|
|
print(code_writer_agent_system_message) |
|
|
|
chat_result = code_executor_agent.initiate_chat( |
|
code_writer_agent, |
|
message=message, |
|
) |
|
|
|
|
|
|
|
return chat_result |