Skip to content

Commit 9a8d84c

Browse files
authored
feat: RolePlaying support async run (#1787)
1 parent 6842d1f commit 9a8d84c

File tree

8 files changed

+219
-17
lines changed

8 files changed

+219
-17
lines changed

.github/ISSUE_TEMPLATE/bug_report.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ body:
2626
attributes:
2727
label: What version of camel are you using?
2828
description: Run command `python3 -c 'print(__import__("camel").__version__)'` in your shell and paste the output here.
29-
placeholder: E.g., 0.2.23
29+
placeholder: E.g., 0.2.24
3030
validations:
3131
required: true
3232

camel/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from camel.logger import disable_logging, enable_logging, set_log_level
1616

17-
__version__ = '0.2.23'
17+
__version__ = '0.2.24'
1818

1919
__all__ = [
2020
'__version__',

camel/societies/role_playing.py

+119
Original file line numberDiff line numberDiff line change
@@ -468,6 +468,42 @@ def init_chat(self, init_msg_content: Optional[str] = None) -> BaseMessage:
468468

469469
return init_msg
470470

471+
async def ainit_chat(
472+
self, init_msg_content: Optional[str] = None
473+
) -> BaseMessage:
474+
r"""Asynchronously initializes the chat by resetting both of the
475+
assistant and user agents. Returns an initial message for the
476+
role-playing session.
477+
478+
Args:
479+
init_msg_content (str, optional): A user-specified initial message.
480+
Will be sent to the role-playing session as the initial
481+
message. (default: :obj:`None`)
482+
483+
Returns:
484+
BaseMessage: A single `BaseMessage` representing the initial
485+
message.
486+
"""
487+
# Currently, reset() is synchronous, but if it becomes async in the
488+
# future, we can await it here
489+
self.assistant_agent.reset()
490+
self.user_agent.reset()
491+
default_init_msg_content = (
492+
"Now start to give me instructions one by one. "
493+
"Only reply with Instruction and Input."
494+
)
495+
if init_msg_content is None:
496+
init_msg_content = default_init_msg_content
497+
498+
# Initialize a message sent by the assistant
499+
init_msg = BaseMessage.make_assistant_message(
500+
role_name=getattr(self.assistant_sys_msg, 'role_name', None)
501+
or "assistant",
502+
content=init_msg_content,
503+
)
504+
505+
return init_msg
506+
471507
def step(
472508
self,
473509
assistant_msg: BaseMessage,
@@ -549,3 +585,86 @@ def step(
549585
info=user_response.info,
550586
),
551587
)
588+
589+
async def astep(
590+
self,
591+
assistant_msg: BaseMessage,
592+
) -> Tuple[ChatAgentResponse, ChatAgentResponse]:
593+
r"""Asynchronously advances the conversation by taking a message from
594+
the assistant, processing it using the user agent, and then processing
595+
the resulting message using the assistant agent. Returns a tuple
596+
containing the resulting assistant message, whether the assistant
597+
agent terminated the conversation, and any additional assistant
598+
information, as well as a tuple containing the resulting user message,
599+
whether the user agent terminated the conversation, and any additional
600+
user information.
601+
602+
Args:
603+
assistant_msg: A `BaseMessage` representing the message from the
604+
assistant.
605+
606+
Returns:
607+
Tuple[ChatAgentResponse, ChatAgentResponse]: A tuple containing two
608+
ChatAgentResponse: the first struct contains the resulting
609+
assistant message, whether the assistant agent terminated the
610+
conversation, and any additional assistant information; the
611+
second struct contains the resulting user message, whether the
612+
user agent terminated the conversation, and any additional user
613+
information.
614+
"""
615+
user_response = await self.user_agent.astep(assistant_msg)
616+
if user_response.terminated or user_response.msgs is None:
617+
return (
618+
ChatAgentResponse(msgs=[], terminated=False, info={}),
619+
ChatAgentResponse(
620+
msgs=[],
621+
terminated=user_response.terminated,
622+
info=user_response.info,
623+
),
624+
)
625+
user_msg = self._reduce_message_options(user_response.msgs)
626+
627+
# To prevent recording the same memory more than once (once in chat
628+
# step and once in role play), and the model generates only one
629+
# response when multi-response support is enabled.
630+
if (
631+
'n' in self.user_agent.model_backend.model_config_dict.keys()
632+
and self.user_agent.model_backend.model_config_dict['n'] > 1
633+
):
634+
self.user_agent.record_message(user_msg)
635+
636+
assistant_response = await self.assistant_agent.astep(user_msg)
637+
if assistant_response.terminated or assistant_response.msgs is None:
638+
return (
639+
ChatAgentResponse(
640+
msgs=[],
641+
terminated=assistant_response.terminated,
642+
info=assistant_response.info,
643+
),
644+
ChatAgentResponse(
645+
msgs=[user_msg], terminated=False, info=user_response.info
646+
),
647+
)
648+
assistant_msg = self._reduce_message_options(assistant_response.msgs)
649+
650+
# To prevent recording the same memory more than once (once in chat
651+
# step and once in role play), and the model generates only one
652+
# response when multi-response support is enabled.
653+
if (
654+
'n' in self.assistant_agent.model_backend.model_config_dict.keys()
655+
and self.assistant_agent.model_backend.model_config_dict['n'] > 1
656+
):
657+
self.assistant_agent.record_message(assistant_msg)
658+
659+
return (
660+
ChatAgentResponse(
661+
msgs=[assistant_msg],
662+
terminated=assistant_response.terminated,
663+
info=assistant_response.info,
664+
),
665+
ChatAgentResponse(
666+
msgs=[user_msg],
667+
terminated=user_response.terminated,
668+
info=user_response.info,
669+
),
670+
)

docs/conf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
project = 'CAMEL'
2828
copyright = '2024, CAMEL-AI.org'
2929
author = 'CAMEL-AI.org'
30-
release = '0.2.23'
30+
release = '0.2.24'
3131

3232
html_favicon = (
3333
'https://raw.githubusercontent.com/camel-ai/camel/master/misc/favicon.png'

docs/key_modules/loaders.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -340,14 +340,14 @@ response = jina_reader.read_content("https://docs.camel-ai.org/")
340340
print(response)
341341
```
342342
```markdown
343-
>>>Welcome to CAMEL’s documentation! — CAMEL 0.2.23 documentation
343+
>>>Welcome to CAMEL’s documentation! — CAMEL 0.2.24 documentation
344344
===============
345345

346346
[Skip to main content](https://docs.camel-ai.org/#main-content)
347347

348348
Back to top Ctrl+K
349349

350-
[![Image 1](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png) ![Image 2](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png)CAMEL 0.2.23](https://docs.camel-ai.org/#)
350+
[![Image 1](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png) ![Image 2](https://raw.githubusercontent.com/camel-ai/camel/master/misc/logo_light.png)CAMEL 0.2.24](https://docs.camel-ai.org/#)
351351

352352
Search Ctrl+K
353353

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
44

55
[project]
66
name = "camel-ai"
7-
version = "0.2.23"
7+
version = "0.2.24"
88
description = "Communicative Agents for AI Society Study"
99
authors = [{ name = "CAMEL-AI.org" }]
1010
requires-python = ">=3.10,<3.13"

test/agents/test_role_playing.py

+93-10
Original file line numberDiff line numberDiff line change
@@ -194,28 +194,50 @@ def test_role_playing_step(
194194

195195

196196
@pytest.mark.model_backend
197-
def test_role_playing_with_function(step_call_count=3):
197+
@pytest.mark.asyncio
198+
@pytest.mark.parametrize(
199+
"task_type, extend_sys_msg_meta_dicts, extend_task_specify_meta_dict",
200+
[
201+
(TaskType.AI_SOCIETY, None, None),
202+
(
203+
TaskType.CODE,
204+
[dict(domain="science", language="python")] * 2,
205+
dict(domain="science", language="python"),
206+
),
207+
(TaskType.MISALIGNMENT, None, None),
208+
],
209+
)
210+
async def test_role_playing_astep(
211+
task_type,
212+
extend_sys_msg_meta_dicts,
213+
extend_task_specify_meta_dict,
214+
step_call_count=3,
215+
):
198216
if model is not None:
199217
model.run = MagicMock(return_value=model_backend_rsp)
200218

201-
tools = MathToolkit().get_tools()
202-
203219
role_playing = RolePlaying(
204220
assistant_role_name="AI Assistant",
205-
assistant_agent_kwargs=dict(
206-
model=model,
207-
tools=tools,
208-
),
221+
assistant_agent_kwargs=dict(model=model),
209222
user_role_name="AI User",
210223
user_agent_kwargs=dict(model=model),
211224
task_prompt="Perform the task",
212225
task_specify_agent_kwargs=dict(model=model),
213-
task_type=TaskType.AI_SOCIETY,
226+
task_type=task_type,
227+
extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts,
228+
extend_task_specify_meta_dict=extend_task_specify_meta_dict,
229+
)
230+
init_assistant_msg = BaseMessage.make_assistant_message(
231+
role_name="AI Assistant", content="Hello"
214232
)
233+
print(role_playing.assistant_agent.system_message)
234+
print(role_playing.user_agent.system_message)
215235

216-
input_msg = role_playing.init_chat()
217236
for i in range(step_call_count):
218-
assistant_response, user_response = role_playing.step(input_msg)
237+
assistant_response, user_response = await role_playing.astep(
238+
init_assistant_msg
239+
)
240+
219241
for response in (assistant_response, user_response):
220242
assert isinstance(
221243
response.msgs, list
@@ -237,6 +259,67 @@ def test_role_playing_with_function(step_call_count=3):
237259
), f"Error in round {i+1}: response.info is not a dict"
238260

239261

262+
@pytest.mark.model_backend
263+
def test_role_playing_with_function(step_call_count=3):
264+
if model is not None:
265+
model.run = MagicMock(return_value=model_backend_rsp)
266+
267+
tools = MathToolkit().get_tools()
268+
role_playing = RolePlaying(
269+
assistant_role_name="AI Assistant",
270+
assistant_agent_kwargs=dict(
271+
model=model,
272+
tools=tools,
273+
),
274+
user_role_name="AI User",
275+
user_agent_kwargs=dict(model=model),
276+
task_prompt="Perform the task",
277+
task_specify_agent_kwargs=dict(model=model),
278+
task_type=TaskType.AI_SOCIETY,
279+
)
280+
281+
input_msg = role_playing.init_chat()
282+
for _ in range(step_call_count):
283+
assistant_response, user_response = role_playing.step(input_msg)
284+
for response in (assistant_response, user_response):
285+
assert isinstance(response.msgs, list)
286+
assert len(response.msgs) == 1
287+
assert isinstance(response.msgs[0], BaseMessage)
288+
assert isinstance(response.terminated, bool)
289+
assert response.terminated is False
290+
assert isinstance(response.info, dict)
291+
292+
293+
@pytest.mark.model_backend
294+
@pytest.mark.asyncio
295+
@pytest.mark.parametrize("init_msg_content", [None, "Custom init message"])
296+
async def test_role_playing_ainit_chat(init_msg_content):
297+
if model is not None:
298+
model.run = MagicMock(return_value=model_backend_rsp)
299+
300+
role_playing = RolePlaying(
301+
assistant_role_name="AI Assistant",
302+
assistant_agent_kwargs=dict(model=model),
303+
user_role_name="AI User",
304+
user_agent_kwargs=dict(model=model),
305+
task_prompt="Perform the task",
306+
task_specify_agent_kwargs=dict(model=model),
307+
task_type=TaskType.AI_SOCIETY,
308+
)
309+
310+
init_msg = await role_playing.ainit_chat(init_msg_content)
311+
assert isinstance(init_msg, BaseMessage)
312+
assert init_msg.role_type == RoleType.ASSISTANT
313+
assert init_msg.role_name == "AI Assistant"
314+
if init_msg_content is not None:
315+
assert init_msg.content == init_msg_content
316+
else:
317+
assert init_msg.content == (
318+
"Now start to give me instructions one by one. "
319+
"Only reply with Instruction and Input."
320+
)
321+
322+
240323
def test_role_playing_role_sequence(
241324
model=None,
242325
):

uv.lock

+1-1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)