Skip to content
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -4287,6 +4287,9 @@ def normilize_message_to_oai(

if oai_message.get("function_call", False) or oai_message.get("tool_calls", False):
oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call.
# Also set the name for tool_calls messages so we can track the originating agent
if "name" not in oai_message:
oai_message["name"] = name
elif "name" not in oai_message:
# If we don't have a name field, append it
oai_message["name"] = name
Expand Down
36 changes: 36 additions & 0 deletions autogen/agentchat/group/group_tool_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,5 +324,41 @@ def _generate_group_tool_reply(
tool_message["tool_responses"] = tool_responses_inner
tool_message["content"] = "\n".join(contents)

# When agent views are isolated, the tool executor's response never reaches
# the originating agent's history via the group manager broadcast. Copy the
# tool response into the originator's thread with the manager so subsequent
# LLM calls have the required tool messages.
if self._group_manager and self._group_manager.groupchat.isolate_agent_views:
originator = self._group_manager.groupchat.agent_by_name(agent_name)
if isinstance(originator, ConversableAgent):
existing_ids: set[str] = set()
history = originator._oai_messages.get(self._group_manager, [])

for hist_msg in history:
# Collect tool_call_ids from prior tool responses
for resp in hist_msg.get("tool_responses", []):
if resp.get("tool_call_id"):
existing_ids.add(resp["tool_call_id"])
# Some tool messages may store tool_call_id directly
if hist_msg.get("tool_call_id"):
existing_ids.add(hist_msg["tool_call_id"])

new_tool_responses = [
resp for resp in tool_message["tool_responses"] if resp.get("tool_call_id") not in existing_ids
]

if new_tool_responses:
deduped_tool_message = deepcopy(tool_message)
deduped_tool_message["tool_responses"] = new_tool_responses
deduped_tool_message["content"] = "\n".join(
resp.get("content", "") for resp in new_tool_responses
)
originator._append_oai_message(
deduped_tool_message,
self._group_manager,
role="user",
name=self._group_manager.name,
)

return True, tool_message
return False, None
52 changes: 52 additions & 0 deletions autogen/agentchat/group/multi_agent_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def initiate_group_chat(
safeguard_policy: dict[str, Any] | str | None = None,
safeguard_llm_config: LLMConfig | None = None,
mask_llm_config: LLMConfig | None = None,
isolate_agent_views: bool = False,
) -> tuple[ChatResult, ContextVariables, "Agent"]:
"""Initialize and run a group chat using a pattern for configuration.

Expand All @@ -46,6 +47,10 @@ def initiate_group_chat(
safeguard_policy: Optional safeguard policy dict or path to JSON file.
safeguard_llm_config: Optional LLM configuration for safeguard checks.
mask_llm_config: Optional LLM configuration for masking.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.
Comment on lines 59 to +64
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This PR is good , I have few logical questions from design point of view:
firstly let's look at a how a tools is being called on high level:
sys prompt + use prompt -> agent -> LLM review context -> tool choice -> tool execution -> return to agent / handoff to another agent -> end

so while this PR is treating context rotting + context window, I am seeing possibilities of context loss.
for e.g., let's look at a scenario say we have a GroupChat with [agent A, Agent B , Agent C]
we have enabled isolated agent views, which isolate agent chat history.
a tool call from agent B, requires context from agent A's output / agent A's tool output to be specific.
where the agent B tool call is recent, and the reference is lost in chat history for another agent.

i guess this is a situation we would likely hit. how should we handle this? i suppose. we can link this issue to this pull. #2242


Returns:
ChatResult: Conversations chat history.
Expand All @@ -71,6 +76,7 @@ def initiate_group_chat(
) = pattern.prepare_group_chat(
max_rounds=max_rounds,
messages=messages,
isolate_agent_views=isolate_agent_views,
)

# Apply safeguards if provided
Expand Down Expand Up @@ -115,6 +121,7 @@ async def a_initiate_group_chat(
safeguard_policy: dict[str, Any] | str | None = None,
safeguard_llm_config: LLMConfig | None = None,
mask_llm_config: LLMConfig | None = None,
isolate_agent_views: bool = False,
) -> tuple[ChatResult, ContextVariables, "Agent"]:
"""Initialize and run a group chat using a pattern for configuration, asynchronously.

Expand All @@ -125,6 +132,10 @@ async def a_initiate_group_chat(
safeguard_policy: Optional safeguard policy dict or path to JSON file.
safeguard_llm_config: Optional LLM configuration for safeguard checks.
mask_llm_config: Optional LLM configuration for masking.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.

Returns:
ChatResult: Conversations chat history.
Expand All @@ -150,6 +161,7 @@ async def a_initiate_group_chat(
) = pattern.prepare_group_chat(
max_rounds=max_rounds,
messages=messages,
isolate_agent_views=isolate_agent_views,
)

# Apply safeguards if provided
Expand Down Expand Up @@ -194,7 +206,25 @@ def run_group_chat(
safeguard_policy: dict[str, Any] | str | None = None,
safeguard_llm_config: LLMConfig | None = None,
mask_llm_config: LLMConfig | None = None,
isolate_agent_views: bool = False,
) -> RunResponseProtocol:
"""Run a group chat using a pattern for configuration in a separate thread.

Args:
pattern: Pattern object that encapsulates the chat configuration.
messages: Initial message(s).
max_rounds: Maximum number of conversation rounds.
safeguard_policy: Optional safeguard policy dict or path to JSON file.
safeguard_llm_config: Optional LLM configuration for safeguard checks.
mask_llm_config: Optional LLM configuration for masking.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.

Returns:
RunResponseProtocol: Response object for tracking the async execution.
"""
iostream = ThreadIOStream()
all_agents = pattern.agents + ([pattern.user_agent] if pattern.user_agent else [])
response = RunResponse(iostream, agents=all_agents)
Expand All @@ -206,6 +236,7 @@ def _initiate_group_chat(
safeguard_policy: dict[str, Any] | str | None = safeguard_policy,
safeguard_llm_config: LLMConfig | None = safeguard_llm_config,
mask_llm_config: LLMConfig | None = mask_llm_config,
isolate_agent_views: bool = isolate_agent_views,
iostream: ThreadIOStream = iostream,
response: RunResponse = response,
) -> None:
Expand All @@ -218,6 +249,7 @@ def _initiate_group_chat(
safeguard_policy=safeguard_policy,
safeguard_llm_config=safeguard_llm_config,
mask_llm_config=mask_llm_config,
isolate_agent_views=isolate_agent_views,
)

IOStream.get_default().send(
Expand Down Expand Up @@ -247,7 +279,25 @@ async def a_run_group_chat(
safeguard_policy: dict[str, Any] | str | None = None,
safeguard_llm_config: LLMConfig | None = None,
mask_llm_config: LLMConfig | None = None,
isolate_agent_views: bool = False,
) -> AsyncRunResponseProtocol:
"""Run a group chat using a pattern for configuration asynchronously.

Args:
pattern: Pattern object that encapsulates the chat configuration.
messages: Initial message(s).
max_rounds: Maximum number of conversation rounds.
safeguard_policy: Optional safeguard policy dict or path to JSON file.
safeguard_llm_config: Optional LLM configuration for safeguard checks.
mask_llm_config: Optional LLM configuration for masking.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.

Returns:
AsyncRunResponseProtocol: Response object for tracking the async execution.
"""
iostream = AsyncThreadIOStream()
all_agents = pattern.agents + ([pattern.user_agent] if pattern.user_agent else [])
response = AsyncRunResponse(iostream, agents=all_agents)
Expand All @@ -259,6 +309,7 @@ async def _initiate_group_chat(
safeguard_policy: dict[str, Any] | str | None = safeguard_policy,
safeguard_llm_config: LLMConfig | None = safeguard_llm_config,
mask_llm_config: LLMConfig | None = mask_llm_config,
isolate_agent_views: bool = isolate_agent_views,
iostream: AsyncThreadIOStream = iostream,
response: AsyncRunResponse = response,
) -> None:
Expand All @@ -271,6 +322,7 @@ async def _initiate_group_chat(
safeguard_policy=safeguard_policy,
safeguard_llm_config=safeguard_llm_config,
mask_llm_config=mask_llm_config,
isolate_agent_views=isolate_agent_views,
)

IOStream.get_default().send(
Expand Down
6 changes: 6 additions & 0 deletions autogen/agentchat/group/patterns/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ def prepare_group_chat(
self,
max_rounds: int,
messages: list[dict[str, Any]] | str,
isolate_agent_views: bool = False,
) -> tuple[
list["ConversableAgent"],
list["ConversableAgent"],
Expand All @@ -96,6 +97,10 @@ def prepare_group_chat(
Args:
max_rounds: Maximum number of conversation rounds.
messages: Initial message(s) to start the conversation.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.

Returns:
Tuple containing all necessary components for the group chat.
Expand All @@ -120,6 +125,7 @@ def prepare_group_chat(
components = super().prepare_group_chat(
max_rounds=max_rounds,
messages=messages,
isolate_agent_views=isolate_agent_views,
)

# Extract the group_after_work and the rest of the components
Expand Down
6 changes: 6 additions & 0 deletions autogen/agentchat/group/patterns/manual.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ def prepare_group_chat(
self,
max_rounds: int,
messages: list[dict[str, Any]] | str,
isolate_agent_views: bool = False,
) -> tuple[
list["ConversableAgent"],
list["ConversableAgent"],
Expand All @@ -83,6 +84,10 @@ def prepare_group_chat(
Args:
max_rounds: Maximum number of conversation rounds.
messages: Initial message(s) to start the conversation.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.

Returns:
Tuple containing all necessary components for the group chat.
Expand All @@ -91,6 +96,7 @@ def prepare_group_chat(
components = super().prepare_group_chat(
max_rounds=max_rounds,
messages=messages,
isolate_agent_views=isolate_agent_views,
)

# Extract the group_after_work and the rest of the components
Expand Down
12 changes: 12 additions & 0 deletions autogen/agentchat/group/patterns/pattern.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ def prepare_group_chat(
self,
max_rounds: int,
messages: list[dict[str, Any]] | str,
isolate_agent_views: bool = False,
) -> tuple[
list["ConversableAgent"],
list["ConversableAgent"],
Expand All @@ -99,6 +100,10 @@ def prepare_group_chat(
Args:
max_rounds: Maximum number of conversation rounds.
messages: Initial message(s) to start the conversation.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.

Returns:
Tuple containing:
Expand Down Expand Up @@ -147,6 +152,7 @@ def prepare_group_chat(
messages=[],
max_round=max_rounds,
speaker_selection_method=group_transition,
isolate_agent_views=isolate_agent_views,
)

# Create the group manager
Expand Down Expand Up @@ -230,6 +236,7 @@ def prepare_group_chat(
self,
max_rounds: int,
messages: list[dict[str, Any]] | str,
isolate_agent_views: bool = False,
) -> tuple[
list["ConversableAgent"],
list["ConversableAgent"],
Expand All @@ -253,6 +260,10 @@ def prepare_group_chat(
Args:
max_rounds: Maximum number of conversation rounds.
messages: Initial message(s) to start the conversation.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.

Returns:
Tuple containing all necessary components for the group chat.
Expand All @@ -275,6 +286,7 @@ def prepare_group_chat(
) = super().prepare_group_chat(
max_rounds=max_rounds,
messages=messages,
isolate_agent_views=isolate_agent_views,
)

# Return all components with our group_after_work
Expand Down
6 changes: 6 additions & 0 deletions autogen/agentchat/group/patterns/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def prepare_group_chat(
self,
max_rounds: int,
messages: list[dict[str, Any]] | str,
isolate_agent_views: bool = False,
) -> tuple[
list["ConversableAgent"],
list["ConversableAgent"],
Expand All @@ -61,6 +62,10 @@ def prepare_group_chat(
Args:
max_rounds: Maximum number of conversation rounds.
messages: Initial message(s) to start the conversation.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.

Returns:
Tuple containing all necessary components for the group chat.
Expand All @@ -83,6 +88,7 @@ def prepare_group_chat(
) = super().prepare_group_chat(
max_rounds=max_rounds,
messages=messages,
isolate_agent_views=isolate_agent_views,
)

# Create the random handoffs between agents
Expand Down
6 changes: 6 additions & 0 deletions autogen/agentchat/group/patterns/round_robin.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def prepare_group_chat(
self,
max_rounds: int,
messages: list[dict[str, Any]] | str,
isolate_agent_views: bool = False,
) -> tuple[
list["ConversableAgent"],
list["ConversableAgent"],
Expand All @@ -72,6 +73,10 @@ def prepare_group_chat(
Args:
max_rounds: Maximum number of conversation rounds.
messages: Initial message(s) to start the conversation.
isolate_agent_views: If True, agents will only maintain their own message history
and will not receive messages from other agents. When False (default), all agents
receive all messages. When True, messages are still stored in groupchat.messages
for the GroupChatManager's view, but are not broadcast to other agents.

Returns:
Tuple containing all necessary components for the group chat.
Expand All @@ -94,6 +99,7 @@ def prepare_group_chat(
) = super().prepare_group_chat(
max_rounds=max_rounds,
messages=messages,
isolate_agent_views=isolate_agent_views,
)

# Create the handoffs between agents
Expand Down
Loading
Loading