DmitriiKhizbullin commited on
Commit
1598f31
1 Parent(s): 204d6d8

Copied from the main repo

Browse files
Files changed (3) hide show
  1. app.py +12 -0
  2. apps/agents/agents.py +444 -0
  3. apps/agents/text_utils.py +31 -0
app.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from apps.agents.agents import construct_blocks, parse_arguments
4
+
5
+ if __name__ == "__main__":
6
+
7
+ api_key = os.environ["OPENAI_API_KEY"]
8
+
9
+ args = parse_arguments()
10
+ blocks = construct_blocks(api_key)
11
+ blocks.queue(args.concurrency_count)
12
+ blocks.launch()
apps/agents/agents.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Gradio-based web app Agents that uses OpenAI API to generate
3
+ a chat between collaborative agents.
4
+ """
5
+
6
+ import argparse
7
+ import os
8
+ import re
9
+ from dataclasses import dataclass
10
+ from typing import Dict, List, Optional, Tuple, Union
11
+
12
+ import gradio as gr
13
+ import openai
14
+ import openai.error
15
+ import tenacity
16
+
17
+ from apps.agents.text_utils import split_markdown_code
18
+ from camel.agents import RolePlaying, TaskSpecifyAgent
19
+ from camel.messages import AssistantChatMessage
20
+
21
+ REPO_ROOT = os.path.realpath(
22
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))
23
+
24
+ ChatBotHistory = List[Tuple[Optional[str], Optional[str]]]
25
+
26
+
27
+ @dataclass
28
+ class State:
29
+ session: Optional[RolePlaying]
30
+ max_messages: int
31
+ chat: ChatBotHistory
32
+ saved_assistant_msg: Optional[AssistantChatMessage]
33
+
34
+ @classmethod
35
+ def empty(cls) -> 'State':
36
+ return cls(None, 0, [], None)
37
+
38
+ @staticmethod
39
+ def construct_inplace(
40
+ state: 'State', session: Optional[RolePlaying], max_messages: int,
41
+ chat: ChatBotHistory,
42
+ saved_assistant_msg: Optional[AssistantChatMessage]) -> None:
43
+ state.session = session
44
+ state.max_messages = max_messages
45
+ state.chat = chat
46
+ state.saved_assistant_msg = saved_assistant_msg
47
+
48
+
49
+ def parse_arguments():
50
+ """ Get command line arguments. """
51
+
52
+ parser = argparse.ArgumentParser("Camel data explorer")
53
+ parser.add_argument('--api-key', type=str, default=None,
54
+ help='OpenAI API key')
55
+ parser.add_argument('--share', type=bool, default=False,
56
+ help='Expose the web UI to Gradio')
57
+ parser.add_argument('--server-port', type=int, default=8080,
58
+ help='Port ot run the web page on')
59
+ parser.add_argument('--inbrowser', type=bool, default=False,
60
+ help='Open the web UI in the default browser on lunch')
61
+ parser.add_argument(
62
+ '--concurrency-count', type=int, default=1,
63
+ help='Number if concurrent threads at Gradio websocket queue. ' +
64
+ 'Increase to serve more requests but keep an eye on RAM usage.')
65
+ args, unknown = parser.parse_known_args()
66
+ if len(unknown) > 0:
67
+ print("Unknown args: ", unknown)
68
+ return args
69
+
70
+
71
+ def load_roles(path: str) -> List[str]:
72
+ """ Load roles from list files.
73
+
74
+ Args:
75
+ path (str): Path to the TXT file.
76
+
77
+ Returns:
78
+ List[str]: List of roles.
79
+ """
80
+
81
+ assert os.path.exists(path)
82
+ roles = []
83
+ with open(path, "r") as f:
84
+ lines = f.readlines()
85
+ for line in lines:
86
+ match = re.search(r"^\d+\.\s*(.+)\n*$", line)
87
+ if match:
88
+ role = match.group(1)
89
+ roles.append(role)
90
+ else:
91
+ print("Warning: no match")
92
+ return roles
93
+
94
+
95
+ def cleanup_on_launch(state) -> Tuple[State, ChatBotHistory, Dict]:
96
+ """ Prepare the UI for a new session.
97
+
98
+ Args:
99
+ state (State): Role playing state.
100
+
101
+ Returns:
102
+ Tuple[State, ChatBotHistory, Dict]:
103
+ - Updated state.
104
+ - Chatbot window contents.
105
+ - Start button state (disabled).
106
+ """
107
+ # The line below breaks the every=N runner
108
+ # `state = State.empty()`
109
+
110
+ State.construct_inplace(state, None, 0, [], None)
111
+
112
+ return state, [], gr.update(interactive=False)
113
+
114
+
115
+ def role_playing_start(
116
+ state,
117
+ assistant: str,
118
+ user: str,
119
+ original_task: str,
120
+ max_messages: float,
121
+ with_task_specifier: bool,
122
+ word_limit: int,
123
+ ) -> Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
124
+ """ Creates a role playing session.
125
+
126
+ Args:
127
+ state (State): Role playing state.
128
+ assistant (str): Contents of the Assistant field.
129
+ user (str): Contents of the User field.
130
+ original_task (str): Original task field.
131
+ with_task_specifier (bool): Enable/Disable task specifier.
132
+ word_limit (int): Limit of words for task specifier.
133
+
134
+ Returns:
135
+ Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
136
+ - Updated state.
137
+ - Generated specified task.
138
+ - Planned task (if any).
139
+ - Chatbot window contents.
140
+ - Progress bar contents.
141
+ """
142
+
143
+ if state.session is not None:
144
+ print("Double click")
145
+ return {} # may fail
146
+
147
+ try:
148
+ task_specify_kwargs = dict(word_limit=word_limit) \
149
+ if with_task_specifier else None
150
+
151
+ session = RolePlaying(assistant, user, original_task,
152
+ with_task_specify=with_task_specifier,
153
+ task_specify_agent_kwargs=task_specify_kwargs,
154
+ with_task_planner=False)
155
+ except (openai.error.RateLimitError, tenacity.RetryError,
156
+ RuntimeError) as ex:
157
+ print("OpenAI API exception 0 " + str(ex))
158
+ return (state, str(ex), "", [], gr.update())
159
+
160
+ # Can't re-create a state like below since it
161
+ # breaks 'role_playing_chat_cont' runner with every=N.
162
+ # `state = State(session=session, max_messages=int(max_messages), chat=[],`
163
+ # ` saved_assistant_msg=None)`
164
+
165
+ State.construct_inplace(state, session, int(max_messages), [], None)
166
+
167
+ specified_task_prompt = session.specified_task_prompt \
168
+ if session.specified_task_prompt is not None else ""
169
+ planned_task_prompt = session.planned_task_prompt \
170
+ if session.planned_task_prompt is not None else ""
171
+
172
+ planned_task_upd = gr.update(
173
+ value=planned_task_prompt, visible=session.planned_task_prompt
174
+ is not None)
175
+
176
+ progress_update = gr.update(maximum=state.max_messages, value=1,
177
+ visible=True)
178
+
179
+ return (state, specified_task_prompt, planned_task_upd, state.chat,
180
+ progress_update)
181
+
182
+
183
+ def role_playing_chat_init(state) -> \
184
+ Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
185
+ """ Initialize role playing.
186
+
187
+ Args:
188
+ state (State): Role playing state.
189
+
190
+ Returns:
191
+ Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
192
+ - Updated state.
193
+ - Chatbot window contents.
194
+ - Progress bar contents.
195
+ """
196
+
197
+ if state.session is None:
198
+ print("Error: session is none on role_playing_chat_init call")
199
+ return state, state.chat, gr.update()
200
+
201
+ try:
202
+ assistant_msg, _ = state.session.init_chat()
203
+ assistant_msg: AssistantChatMessage
204
+ except (openai.error.RateLimitError, tenacity.RetryError,
205
+ RuntimeError) as ex:
206
+ print("OpenAI API exception 1 " + str(ex))
207
+ state.session = None
208
+ return state, state.chat, gr.update()
209
+
210
+ state.saved_assistant_msg = assistant_msg
211
+
212
+ progress_update = gr.update(maximum=state.max_messages, value=1,
213
+ visible=True)
214
+
215
+ return state, state.chat, progress_update
216
+
217
+
218
+ # WORKAROUND: do not add type hinst for session and chatbot_histoty
219
+ def role_playing_chat_cont(state) -> \
220
+ Tuple[State, ChatBotHistory, Dict, Dict]:
221
+ """ Produce a pair of messages by an assistant and a user.
222
+ To be run multiple times.
223
+
224
+ Args:
225
+ state (State): Role playing state.
226
+
227
+ Returns:
228
+ Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
229
+ - Updated state.
230
+ - Chatbot window contents.
231
+ - Progress bar contents.
232
+ - Start button state (to be eventually enabled).
233
+ """
234
+
235
+ if state.session is None:
236
+ return state, state.chat, gr.update(visible=False), gr.update()
237
+
238
+ if state.saved_assistant_msg is None:
239
+ return state, state.chat, gr.update(), gr.update()
240
+
241
+ try:
242
+ assistant_msgs, user_msgs = state.session.step(
243
+ state.saved_assistant_msg)
244
+ except (openai.error.RateLimitError, tenacity.RetryError,
245
+ RuntimeError) as ex:
246
+ print("OpenAI API exception 2 " + str(ex))
247
+ state.session = None
248
+ return state, state.chat, gr.update(), gr.update()
249
+
250
+ u_msg = user_msgs[0]
251
+ a_msg = assistant_msgs[0]
252
+
253
+ state.saved_assistant_msg = a_msg
254
+
255
+ state.chat.append((None, split_markdown_code(u_msg.content)))
256
+ state.chat.append((split_markdown_code(a_msg.content), None))
257
+
258
+ if len(state.chat) >= state.max_messages:
259
+ state.session = None
260
+
261
+ if "CAMEL_TASK_DONE" in a_msg.content or \
262
+ "CAMEL_TASK_DONE" in u_msg.content:
263
+ state.session = None
264
+
265
+ progress_update = gr.update(maximum=state.max_messages,
266
+ value=len(state.chat), visible=state.session
267
+ is not None)
268
+
269
+ start_bn_update = gr.update(interactive=state.session is None)
270
+
271
+ return state, state.chat, progress_update, start_bn_update
272
+
273
+
274
+ def stop_session(state) -> Tuple[State, Dict, Dict]:
275
+ """ Finish the session and leave chat contents as an artefact.
276
+
277
+ Args:
278
+ state (State): Role playing state.
279
+
280
+ Returns:
281
+ Union[Dict, Tuple[State, ChatBotHistory, Dict]]:
282
+ - Updated state.
283
+ - Progress bar contents.
284
+ - Start button state (to be eventually enabled).
285
+ """
286
+
287
+ state.session = None
288
+ return state, gr.update(visible=False), gr.update(interactive=True)
289
+
290
+
291
+ def construct_ui(blocks, api_key: Optional[str] = None) -> None:
292
+ """ Build Gradio UI and populate with topics.
293
+
294
+ Args:
295
+ api_key (str): OpenAI API key.
296
+
297
+ Returns:
298
+ None
299
+ """
300
+
301
+ if api_key is not None:
302
+ openai.api_key = api_key
303
+
304
+ assistant_role_path = \
305
+ os.path.join(REPO_ROOT, "data/ai_society/assistant_roles.txt")
306
+ user_role_path = \
307
+ os.path.join(REPO_ROOT, "data/ai_society/user_roles.txt")
308
+
309
+ assistant_roles = load_roles(assistant_role_path)
310
+ user_roles = load_roles(user_role_path)
311
+
312
+ assistant_role = "Python Programmer"
313
+ user_role = "Stock Trader"
314
+
315
+ default_task = "Develop a trading bot for the stock market"
316
+
317
+ with gr.Row():
318
+ with gr.Column(scale=1):
319
+ assistant_dd = gr.Dropdown(assistant_roles,
320
+ label="Example assistant roles",
321
+ value=assistant_role, interactive=True)
322
+ assistant_ta = gr.TextArea(label="Assistant role (EDIT ME)",
323
+ lines=1, interactive=True)
324
+ with gr.Column(scale=1):
325
+ user_dd = gr.Dropdown(user_roles, label="Example user roles",
326
+ value=user_role, interactive=True)
327
+ user_ta = gr.TextArea(label="User role (EDIT ME)", lines=1,
328
+ interactive=True)
329
+ with gr.Column(scale=1):
330
+ gr.Markdown(
331
+ "## CAMEL: Communicative Agents for \"Mind\" Exploration"
332
+ " of Large Scale Language Model Society\n"
333
+ "Github repo: [https://github.com/lightaime/camel]"
334
+ "(https://github.com/lightaime/camel)"
335
+ '<div style="display:flex; justify-content:center;">'
336
+ '<img src="https://raw.githubusercontent.com/lightaime/camel/'
337
+ 'master/misc/logo.png" alt="Logo" style="max-width:50%;">'
338
+ '</div>')
339
+ with gr.Row():
340
+ with gr.Column(scale=9):
341
+ original_task_ta = gr.TextArea(
342
+ label="Give me a preliminary idea (EDIT ME)",
343
+ value=default_task, lines=1, interactive=True)
344
+ with gr.Column(scale=1):
345
+ universal_task_bn = gr.Button("Insert universal task")
346
+ with gr.Row():
347
+ with gr.Column():
348
+ with gr.Row():
349
+ task_specifier_cb = gr.Checkbox(value=True,
350
+ label="With task specifier")
351
+ with gr.Row():
352
+ ts_word_limit_nb = gr.Number(
353
+ value=TaskSpecifyAgent.DEFAULT_WORD_LIMIT,
354
+ label="Word limit for task specifier",
355
+ visible=task_specifier_cb.value)
356
+ with gr.Column():
357
+ num_messages_sl = gr.Slider(minimum=1, maximum=50, step=1,
358
+ value=10, interactive=True,
359
+ label="Messages to generate")
360
+
361
+ with gr.Column(scale=2):
362
+ with gr.Row():
363
+ start_bn = gr.Button("Make agents chat [takes time]",
364
+ elem_id="start_button")
365
+ with gr.Row():
366
+ clear_bn = gr.Button("Interrupt the current query")
367
+ progress_sl = gr.Slider(minimum=0, maximum=100, value=0, step=1,
368
+ label="Progress", interactive=False, visible=False)
369
+ specified_task_ta = gr.TextArea(
370
+ label="Specified task prompt given to the role-playing session"
371
+ " based on the original (simplistic) idea", lines=1, interactive=False)
372
+ task_prompt_ta = gr.TextArea(label="Planned task prompt", lines=1,
373
+ interactive=False, visible=False)
374
+ chatbot = gr.Chatbot(label="Chat between autonomous agents")
375
+ session_state = gr.State(State.empty())
376
+
377
+ universal_task_bn.click(lambda: "Help me to do my job", None,
378
+ original_task_ta)
379
+
380
+ task_specifier_cb.change(lambda v: gr.update(visible=v), task_specifier_cb,
381
+ ts_word_limit_nb)
382
+
383
+ start_bn.click(cleanup_on_launch, session_state,
384
+ [session_state, chatbot, start_bn], queue=False) \
385
+ .then(role_playing_start,
386
+ [session_state, assistant_ta, user_ta,
387
+ original_task_ta, num_messages_sl,
388
+ task_specifier_cb, ts_word_limit_nb],
389
+ [session_state, specified_task_ta, task_prompt_ta,
390
+ chatbot, progress_sl],
391
+ queue=False) \
392
+ .then(role_playing_chat_init, session_state,
393
+ [session_state, chatbot, progress_sl], queue=False)
394
+
395
+ blocks.load(role_playing_chat_cont, session_state,
396
+ [session_state, chatbot, progress_sl, start_bn], every=0.5)
397
+
398
+ clear_bn.click(stop_session, session_state,
399
+ [session_state, progress_sl, start_bn])
400
+
401
+ assistant_dd.change(lambda dd: dd, assistant_dd, assistant_ta)
402
+ user_dd.change(lambda dd: dd, user_dd, user_ta)
403
+
404
+ blocks.load(lambda dd: dd, assistant_dd, assistant_ta)
405
+ blocks.load(lambda dd: dd, user_dd, user_ta)
406
+
407
+
408
+ def construct_blocks(api_key: Optional[str]):
409
+ """ Construct Agents app but do not launch it.
410
+
411
+ Args:
412
+ api_key (Optional[str]): OpenAI API key.
413
+
414
+ Returns:
415
+ gr.Blocks: Blocks instance.
416
+ """
417
+
418
+ css_str = "#start_button {border: 3px solid #4CAF50; font-size: 20px;}"
419
+
420
+ with gr.Blocks(css=css_str) as blocks:
421
+ construct_ui(blocks, api_key)
422
+
423
+ return blocks
424
+
425
+
426
+ def main():
427
+ """ Entry point. """
428
+
429
+ args = parse_arguments()
430
+
431
+ print("Getting Agents web server online...")
432
+
433
+ blocks = construct_blocks(args.api_key)
434
+
435
+ blocks.queue(args.concurrency_count) \
436
+ .launch(share=args.share, inbrowser=args.inbrowser,
437
+ server_name="0.0.0.0", server_port=args.server_port,
438
+ debug=True)
439
+
440
+ print("Exiting.")
441
+
442
+
443
+ if __name__ == "__main__":
444
+ main()
apps/agents/text_utils.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ def split_markdown_code(string: str) -> str:
5
+ """ Split a multiline block of markdown code (triple-quotes) into
6
+ line-sized sub-blocks to make newlines stay where they belong.
7
+ This transformation is a workaround to a known Gradio bug:
8
+ https://github.com/gradio-app/gradio/issues/3531
9
+
10
+ Args:
11
+ string (str): markdown string incompatible with gr.Chatbot
12
+
13
+ Returns:
14
+ str: markdown string which is compatible with gr.Chatbot
15
+ """
16
+ substr_list = string.split("```")
17
+ out = []
18
+ for i_subs, subs in enumerate(substr_list):
19
+ if i_subs % 2 == 0: # outsize code, don't change
20
+ out.append(subs)
21
+ else: # inside code
22
+ br_done = re.sub(r"<br>", "\n", subs)
23
+
24
+ def repl(m):
25
+ return "```{}```".format(m.group(0))
26
+
27
+ new_subs = re.sub(r"\n+", repl, br_done)
28
+ out.append(new_subs)
29
+ out_str = "```".join(out)
30
+ out_str_cleanup = re.sub(r"``````", "", out_str)
31
+ return out_str_cleanup