File size: 2,596 Bytes
64b2df2
ed306c8
722929d
 
ed306c8
722929d
 
 
 
 
 
8536ead
ed306c8
7ddfa83
722929d
 
 
 
 
 
 
 
 
 
d56f6b1
722929d
 
 
 
ed306c8
7ddfa83
722929d
 
 
d56f6b1
722929d
ed306c8
7ddfa83
722929d
 
d56f6b1
722929d
 
 
 
 
 
 
 
d56f6b1
 
 
 
 
 
 
 
 
 
 
 
 
722929d
 
 
d56f6b1
722929d
7ddfa83
722929d
 
 
7ddfa83
722929d
 
 
 
ed306c8
722929d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import autogen

def run_multi_agent(llm, task):
    llm_config = {"model": llm}
    
    user_proxy = autogen.ConversableAgent(
        name="Admin",
        system_message="Give the task, and send "
        "instructions to writer to refine the blog post.",
        code_execution_config=False,
        llm_config=llm_config,
        human_input_mode="NEVER",
    )
    
    planner = autogen.ConversableAgent(
        name="Planner",
        system_message="Given a task, please determine "
        "what information is needed to complete the task. "
        "Please note that the information will all be retrieved using"
        " Python code. Please only suggest information that can be "
        "retrieved using Python code. "
        "After each step is done by others, check the progress and "
        "instruct the remaining steps. If a step fails, try to "
        "workaround",
        description="Planner. Given a task, determine what "
        "information is needed to complete the task. "
        "After each step is done by others, check the progress and "
        "instruct the remaining steps",
        llm_config=llm_config,
    )
    
    engineer = autogen.AssistantAgent(
        name="Engineer",
        llm_config=llm_config,
        description="An engineer that writes code based on the plan "
        "provided by the planner.",
    )
    
    executor = autogen.ConversableAgent(
        name="Executor",
        system_message="Execute the code written by the "
        "engineer and report the result.",
        human_input_mode="NEVER",
        code_execution_config={
            "last_n_messages": 3,
            "work_dir": "coding",
            "use_docker": False,
        },
    )
    
    writer = autogen.ConversableAgent(
        name="Writer",
        llm_config=llm_config,
        system_message="Writer."
        "Please write blogs in markdown format (with relevant titles)"
        " and put the content in pseudo ```md``` code block. "
        "You take feedback from the admin and refine your blog.",
        description="Writer."
        "Write blogs based on the code execution results and take "
        "feedback from the admin to refine the blog."
    )
    
    groupchat = autogen.GroupChat(
        agents=[user_proxy, engineer, writer, executor, planner],
        messages=[],
        max_round=25,
    )
    
    manager = autogen.GroupChatManager(
        groupchat=groupchat, llm_config=llm_config
    )
    
    groupchat_result = user_proxy.initiate_chat(
        manager,
        message=task,
    )
    
    return groupchat_result