thinkall commited on
Commit
e11ba6d
1 Parent(s): bbdc1ce

Add get human input

Browse files
Files changed (4) hide show
  1. .gitignore +174 -0
  2. app.py +14 -7
  3. autogen_utils.py +160 -7
  4. configs.py +5 -0
.gitignore ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .docusaurus/
2
+ node_modules/
3
+ # Project
4
+ /.vs
5
+ .vscode
6
+
7
+ # Log files
8
+ *.log
9
+
10
+ # Python virtualenv
11
+ .venv
12
+
13
+ # Byte-compiled / optimized / DLL files
14
+ __pycache__/
15
+ *.py[cod]
16
+ *$py.class
17
+
18
+ # C extensions
19
+ *.so
20
+
21
+ # Distribution / packaging
22
+ .Python
23
+ build/
24
+ develop-eggs/
25
+ dist/
26
+ downloads/
27
+ eggs/
28
+ .eggs/
29
+ lib/
30
+ lib64/
31
+ parts/
32
+ sdist/
33
+ var/
34
+ wheels/
35
+ share/python-wheels/
36
+ *.egg-info/
37
+ .installed.cfg
38
+ *.egg
39
+ MANIFEST
40
+
41
+ # PyInstaller
42
+ # Usually these files are written by a python script from a template
43
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
44
+ *.manifest
45
+ *.spec
46
+
47
+ # Installer logs
48
+ pip-log.txt
49
+ pip-delete-this-directory.txt
50
+
51
+ # Unit test / coverage reports
52
+ htmlcov/
53
+ .tox/
54
+ .nox/
55
+ .coverage
56
+ .coverage.*
57
+ .cache
58
+ nosetests.xml
59
+ coverage.xml
60
+ *.cover
61
+ *.py,cover
62
+ .hypothesis/
63
+ .pytest_cache/
64
+ cover/
65
+
66
+ # Translations
67
+ *.mo
68
+ *.pot
69
+
70
+ # Django stuff:
71
+ *.log
72
+ local_settings.py
73
+ db.sqlite3
74
+ db.sqlite3-journal
75
+
76
+ # Flask stuff:
77
+ instance/
78
+ .webassets-cache
79
+
80
+ # Scrapy stuff:
81
+ .scrapy
82
+
83
+ # Sphinx documentation
84
+ docs/_build/
85
+
86
+ # PyBuilder
87
+ .pybuilder/
88
+ target/
89
+
90
+ # Jupyter Notebook
91
+ .ipynb_checkpoints
92
+
93
+ # IPython
94
+ profile_default/
95
+ ipython_config.py
96
+
97
+ # pyenv
98
+ # For a library or package, you might want to ignore these files since the code is
99
+ # intended to run in multiple environments; otherwise, check them in:
100
+ # .python-version
101
+
102
+ # pipenv
103
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
104
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
105
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
106
+ # install all needed dependencies.
107
+ #Pipfile.lock
108
+
109
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
110
+ __pypackages__/
111
+
112
+ # Celery stuff
113
+ celerybeat-schedule
114
+ celerybeat.pid
115
+
116
+ # SageMath parsed files
117
+ *.sage.py
118
+
119
+ # Environments
120
+ .env
121
+ .venv
122
+ env/
123
+ venv/
124
+ ENV/
125
+ env.bak/
126
+ venv.bak/
127
+
128
+ # Spyder project settings
129
+ .spyderproject
130
+ .spyproject
131
+
132
+ # Rope project settings
133
+ .ropeproject
134
+
135
+ # mkdocs documentation
136
+ /site
137
+
138
+ # mypy
139
+ .mypy_cache/
140
+ .dmypy.json
141
+ dmypy.json
142
+
143
+ # Pyre type checker
144
+ .pyre/
145
+
146
+ # pytype static type analyzer
147
+ .pytype/
148
+
149
+ # Cython debug symbols
150
+ cython_debug/
151
+
152
+ logs
153
+
154
+ .idea/*
155
+ .DS_Store
156
+
157
+ output/
158
+ *.pkl
159
+
160
+ # local config files
161
+ *.config.local
162
+ OAI_CONFIG_LIST
163
+ key_openai.txt
164
+ key_aoai.txt
165
+ base_aoai.txt
166
+ wolfram.txt
167
+
168
+ # DB on disk for TeachableAgent
169
+ tmp/
170
+
171
+ .cache
172
+ test*
173
+ coding
174
+ .chromadb
app.py CHANGED
@@ -1,24 +1,23 @@
 
1
  import os
2
  import random
3
  import time
 
4
 
5
  import autogen
6
  import panel as pn
7
  from autogen_utils import (
8
  MathUserProxyAgent,
9
  RetrieveUserProxyAgent,
 
10
  get_retrieve_config,
11
  initialize_agents,
12
  )
 
13
  from custom_widgets import RowAgentWidget
14
  from panel.chat import ChatInterface
15
- from panel.widgets import Button, PasswordInput, Switch, TextInput
16
 
17
- TIMEOUT = 60
18
- TITLE = "Microsoft AutoGen Playground"
19
- Q1 = "What's AutoGen?"
20
- Q2 = "Write a python function to compute the sum of numbers."
21
- Q3 = "find papers on LLM applications from arxiv in the last week, create a markdown table of different domains."
22
  pn.extension(design="material")
23
 
24
  template = pn.template.BootstrapTemplate(title=TITLE)
@@ -199,7 +198,6 @@ async def agents_chat(init_sender, manager, contents, agents):
199
 
200
 
201
  async def reply_chat(contents, user, instance):
202
- # print([message for message in instance.objects])
203
  if hasattr(instance, "collection_name"):
204
  collection_name = instance.collection_name
205
  else:
@@ -224,6 +222,15 @@ async def reply_chat(contents, user, instance):
224
  if "UserProxy" in str(type(agent)):
225
  init_sender = agent
226
  break
 
 
 
 
 
 
 
 
 
227
  if not init_sender:
228
  init_sender = agents[0]
229
  await agents_chat(init_sender, manager, contents, agents)
 
1
+ import asyncio
2
  import os
3
  import random
4
  import time
5
+ from functools import partial
6
 
7
  import autogen
8
  import panel as pn
9
  from autogen_utils import (
10
  MathUserProxyAgent,
11
  RetrieveUserProxyAgent,
12
+ check_termination_and_human_reply,
13
  get_retrieve_config,
14
  initialize_agents,
15
  )
16
+ from configs import Q1, Q2, Q3, TIMEOUT, TITLE
17
  from custom_widgets import RowAgentWidget
18
  from panel.chat import ChatInterface
19
+ from panel.widgets import Button, PasswordInput, Switch, TextAreaInput, TextInput
20
 
 
 
 
 
 
21
  pn.extension(design="material")
22
 
23
  template = pn.template.BootstrapTemplate(title=TITLE)
 
198
 
199
 
200
  async def reply_chat(contents, user, instance):
 
201
  if hasattr(instance, "collection_name"):
202
  collection_name = instance.collection_name
203
  else:
 
222
  if "UserProxy" in str(type(agent)):
223
  init_sender = agent
224
  break
225
+ for agent in agents:
226
+ # Hack for get human input
227
+ agent._reply_func_list.pop(1)
228
+ agent.register_reply(
229
+ [autogen.Agent, None],
230
+ partial(check_termination_and_human_reply, instance=instance),
231
+ 1,
232
+ )
233
+
234
  if not init_sender:
235
  init_sender = agents[0]
236
  await agents_chat(init_sender, manager, contents, agents)
autogen_utils.py CHANGED
@@ -1,10 +1,13 @@
 
1
  import sys
2
  import threading
 
3
  from ast import literal_eval
4
 
5
  import autogen
6
  import chromadb
7
- from autogen import AssistantAgent, UserProxyAgent
 
8
  from autogen.agentchat.contrib.compressible_agent import CompressibleAgent
9
  from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
10
  from autogen.agentchat.contrib.llava_agent import LLaVAAgent
@@ -13,6 +16,15 @@ from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistant
13
  from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
14
  from autogen.agentchat.contrib.teachable_agent import TeachableAgent
15
  from autogen.code_utils import extract_code
 
 
 
 
 
 
 
 
 
16
 
17
 
18
  def get_retrieve_config(docs_path, model_name, collection_name):
@@ -29,7 +41,9 @@ def get_retrieve_config(docs_path, model_name, collection_name):
29
 
30
  # autogen.ChatCompletion.start_logging()
31
  def termination_msg(x):
32
- return isinstance(x, dict) and "TERMINATE" == str(x.get("content", ""))[-9:].upper()
 
 
33
 
34
 
35
  def _is_termination_msg(message):
@@ -50,6 +64,37 @@ def _is_termination_msg(message):
50
  return not contain_code
51
 
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  def initialize_agents(
54
  llm_config, agent_name, system_msg, agent_type, retrieve_config=None, code_execution_config=False
55
  ):
@@ -57,11 +102,11 @@ def initialize_agents(
57
  agent = RetrieveUserProxyAgent(
58
  name=agent_name,
59
  is_termination_msg=termination_msg,
60
- human_input_mode="NEVER",
61
  max_consecutive_auto_reply=5,
62
  retrieve_config=retrieve_config,
63
  code_execution_config=code_execution_config, # set to False if you don't want to execute the code
64
- default_auto_reply="Reply `TERMINATE` if the task is done.",
65
  )
66
  elif "GPTAssistantAgent" == agent_type:
67
  agent = GPTAssistantAgent(
@@ -88,9 +133,9 @@ def initialize_agents(
88
  agent = globals()[agent_type](
89
  name=agent_name,
90
  is_termination_msg=termination_msg,
91
- human_input_mode="NEVER",
92
  system_message=system_msg,
93
- default_auto_reply="Reply `TERMINATE` if the task is done.",
94
  max_consecutive_auto_reply=5,
95
  code_execution_config=code_execution_config,
96
  )
@@ -102,6 +147,114 @@ def initialize_agents(
102
  system_message=system_msg,
103
  llm_config=llm_config,
104
  )
105
-
 
 
 
 
 
106
  return agent
107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
  import sys
3
  import threading
4
+ import time
5
  from ast import literal_eval
6
 
7
  import autogen
8
  import chromadb
9
+ import panel as pn
10
+ from autogen import Agent, AssistantAgent, UserProxyAgent
11
  from autogen.agentchat.contrib.compressible_agent import CompressibleAgent
12
  from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
13
  from autogen.agentchat.contrib.llava_agent import LLaVAAgent
 
16
  from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
17
  from autogen.agentchat.contrib.teachable_agent import TeachableAgent
18
  from autogen.code_utils import extract_code
19
+ from configs import Q1, Q2, Q3, TIMEOUT, TITLE
20
+ from panel.widgets import TextAreaInput
21
+
22
+ try:
23
+ from termcolor import colored
24
+ except ImportError:
25
+
26
+ def colored(x, *args, **kwargs):
27
+ return x
28
 
29
 
30
  def get_retrieve_config(docs_path, model_name, collection_name):
 
41
 
42
  # autogen.ChatCompletion.start_logging()
43
  def termination_msg(x):
44
+ """Check if a message is a termination message."""
45
+ _msg = str(x.get("content", "")).upper().strip().strip("\n").strip(".")
46
+ return isinstance(x, dict) and (_msg.endswith("TERMINATE") or _msg.startswith("TERMINATE"))
47
 
48
 
49
  def _is_termination_msg(message):
 
64
  return not contain_code
65
 
66
 
67
+ def new_generate_oai_reply(
68
+ self,
69
+ messages=None,
70
+ sender=None,
71
+ config=None,
72
+ ):
73
+ """Generate a reply using autogen.oai."""
74
+ client = self.client if config is None else config
75
+ if client is None:
76
+ return False, None
77
+ if messages is None:
78
+ messages = self._oai_messages[sender]
79
+
80
+ # handle 336006 https://cloud.baidu.com/doc/WENXINWORKSHOP/s/tlmyncueh
81
+ _context = messages[-1].pop("context", None)
82
+ _messages = self._oai_system_message + messages
83
+ for idx, msg in enumerate(_messages):
84
+ if idx == 0:
85
+ continue
86
+ if idx % 2 == 1:
87
+ msg["role"] = "user" if msg.get("role") != "function" else "function"
88
+ else:
89
+ msg["role"] = "assistant"
90
+ if len(_messages) % 2 == 1:
91
+ _messages.append({"content": "Please reply exactly `TERMINATE` to me if the task is done.", "role": "user"})
92
+ # print(f"messages: {_messages}")
93
+ response = client.create(context=_context, messages=_messages)
94
+ # print(f"{response=}")
95
+ return True, client.extract_text_or_function_call(response)[0]
96
+
97
+
98
  def initialize_agents(
99
  llm_config, agent_name, system_msg, agent_type, retrieve_config=None, code_execution_config=False
100
  ):
 
102
  agent = RetrieveUserProxyAgent(
103
  name=agent_name,
104
  is_termination_msg=termination_msg,
105
+ human_input_mode="TERMINATE",
106
  max_consecutive_auto_reply=5,
107
  retrieve_config=retrieve_config,
108
  code_execution_config=code_execution_config, # set to False if you don't want to execute the code
109
+ default_auto_reply="Please reply exactly `TERMINATE` to me if the task is done.",
110
  )
111
  elif "GPTAssistantAgent" == agent_type:
112
  agent = GPTAssistantAgent(
 
133
  agent = globals()[agent_type](
134
  name=agent_name,
135
  is_termination_msg=termination_msg,
136
+ human_input_mode="TERMINATE",
137
  system_message=system_msg,
138
+ default_auto_reply="Please reply exactly `TERMINATE` to me if the task is done.",
139
  max_consecutive_auto_reply=5,
140
  code_execution_config=code_execution_config,
141
  )
 
147
  system_message=system_msg,
148
  llm_config=llm_config,
149
  )
150
+ # if any(["ernie" in cfg["model"].lower() for cfg in llm_config["config_list"]]):
151
+ if "ernie" in llm_config["config_list"][0]["model"].lower():
152
+ # Hack for ERNIE Bot models
153
+ # print("Hack for ERNIE Bot models.")
154
+ agent._reply_func_list.pop(-1)
155
+ agent.register_reply([Agent, None], new_generate_oai_reply, -1)
156
  return agent
157
 
158
+
159
+ async def get_human_input(name, prompt: str, instance=None) -> str:
160
+ """Get human input."""
161
+ if instance is None:
162
+ return input(prompt)
163
+ get_input_widget = TextAreaInput(placeholder=prompt, name="", sizing_mode="stretch_width")
164
+ get_input_checkbox = pn.widgets.Checkbox(name="Check to Submit Feedback")
165
+ instance.send(pn.Row(get_input_widget, get_input_checkbox), user=name, respond=False)
166
+ ts = time.time()
167
+ while True:
168
+ if time.time() - ts > TIMEOUT:
169
+ instance.send(
170
+ f"You didn't provide your feedback in {TIMEOUT} seconds, skip and use auto-reply.",
171
+ user=name,
172
+ respond=False,
173
+ )
174
+ reply = ""
175
+ break
176
+ if get_input_widget.value != "" and get_input_checkbox.value is True:
177
+ get_input_widget.disabled = True
178
+ reply = get_input_widget.value
179
+ break
180
+ await asyncio.sleep(0.1)
181
+ return reply
182
+
183
+
184
+ async def check_termination_and_human_reply(
185
+ self,
186
+ messages=None,
187
+ sender=None,
188
+ config=None,
189
+ instance=None,
190
+ ):
191
+ """Check if the conversation should be terminated, and if human reply is provided."""
192
+ if config is None:
193
+ config = self
194
+ if messages is None:
195
+ messages = self._oai_messages[sender]
196
+ message = messages[-1]
197
+ reply = ""
198
+ no_human_input_msg = ""
199
+ if self.human_input_mode == "ALWAYS":
200
+ reply = await get_human_input(
201
+ self.name,
202
+ f"Provide feedback to {sender.name}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: ",
203
+ instance,
204
+ )
205
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
206
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
207
+ reply = reply if reply or not self._is_termination_msg(message) else "exit"
208
+ else:
209
+ if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
210
+ if self.human_input_mode == "NEVER":
211
+ reply = "exit"
212
+ else:
213
+ # self.human_input_mode == "TERMINATE":
214
+ terminate = self._is_termination_msg(message)
215
+ reply = await get_human_input(
216
+ self.name,
217
+ f"Please give feedback to {sender.name}. Press enter or type 'exit' to stop the conversation: "
218
+ if terminate
219
+ else f"Please give feedback to {sender.name}. Press enter to skip and use auto-reply, or type 'exit' to stop the conversation: ",
220
+ instance,
221
+ )
222
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
223
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
224
+ reply = reply if reply or not terminate else "exit"
225
+ elif self._is_termination_msg(message):
226
+ if self.human_input_mode == "NEVER":
227
+ reply = "exit"
228
+ else:
229
+ # self.human_input_mode == "TERMINATE":
230
+ reply = await get_human_input(
231
+ self.name,
232
+ f"Please give feedback to {sender.name}. Press enter or type 'exit' to stop the conversation: ",
233
+ instance,
234
+ )
235
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
236
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
237
+ reply = reply or "exit"
238
+
239
+ # print the no_human_input_msg
240
+ if no_human_input_msg:
241
+ print(colored(f"\n>>>>>>>> {no_human_input_msg}", "red"), flush=True)
242
+
243
+ # stop the conversation
244
+ if reply == "exit":
245
+ # reset the consecutive_auto_reply_counter
246
+ self._consecutive_auto_reply_counter[sender] = 0
247
+ return True, None
248
+
249
+ # send the human reply
250
+ if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
251
+ # reset the consecutive_auto_reply_counter
252
+ self._consecutive_auto_reply_counter[sender] = 0
253
+ return True, reply
254
+
255
+ # increment the consecutive_auto_reply_counter
256
+ self._consecutive_auto_reply_counter[sender] += 1
257
+ if self.human_input_mode != "NEVER":
258
+ print(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)
259
+
260
+ return False, None
configs.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ TIMEOUT = 60
2
+ TITLE = "Microsoft AutoGen Playground"
3
+ Q1 = "What's AutoGen?"
4
+ Q2 = "Write a python function to compute the sum of numbers."
5
+ Q3 = "find papers on LLM applications from arxiv in the last week, create a markdown table of different domains."