diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 5ed5e6bc2a1..9c91396f646 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -4610,6 +4610,9 @@ def normilize_message_to_oai( if oai_message.get("function_call", False) or oai_message.get("tool_calls", False): oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call. + # Also set the name for tool_calls messages so we can track the originating agent + if "name" not in oai_message: + oai_message["name"] = name elif "name" not in oai_message: # If we don't have a name field, append it oai_message["name"] = name diff --git a/autogen/agentchat/group/group_tool_executor.py b/autogen/agentchat/group/group_tool_executor.py index 6795d3969be..1ff43dc7534 100644 --- a/autogen/agentchat/group/group_tool_executor.py +++ b/autogen/agentchat/group/group_tool_executor.py @@ -324,5 +324,41 @@ def _generate_group_tool_reply( tool_message["tool_responses"] = tool_responses_inner tool_message["content"] = "\n".join(contents) + # When agent views are isolated, the tool executor's response never reaches + # the originating agent's history via the group manager broadcast. Copy the + # tool response into the originator's thread with the manager so subsequent + # LLM calls have the required tool messages. + if self._group_manager and self._group_manager.groupchat.isolate_agent_views: + originator = self._group_manager.groupchat.agent_by_name(agent_name) + if isinstance(originator, ConversableAgent): + existing_ids: set[str] = set() + history = originator._oai_messages.get(self._group_manager, []) + + for hist_msg in history: + # Collect tool_call_ids from prior tool responses + for resp in hist_msg.get("tool_responses", []): + if resp.get("tool_call_id"): + existing_ids.add(resp["tool_call_id"]) + # Some tool messages may store tool_call_id directly + if hist_msg.get("tool_call_id"): + existing_ids.add(hist_msg["tool_call_id"]) + + new_tool_responses = [ + resp for resp in tool_message["tool_responses"] if resp.get("tool_call_id") not in existing_ids + ] + + if new_tool_responses: + deduped_tool_message = deepcopy(tool_message) + deduped_tool_message["tool_responses"] = new_tool_responses + deduped_tool_message["content"] = "\n".join( + resp.get("content", "") for resp in new_tool_responses + ) + originator._append_oai_message( + deduped_tool_message, + self._group_manager, + role="user", + name=self._group_manager.name, + ) + return True, tool_message return False, None diff --git a/autogen/agentchat/group/multi_agent_chat.py b/autogen/agentchat/group/multi_agent_chat.py index 21bf0e86a85..4e949f2dc0e 100644 --- a/autogen/agentchat/group/multi_agent_chat.py +++ b/autogen/agentchat/group/multi_agent_chat.py @@ -47,6 +47,7 @@ def initiate_group_chat( safeguard_policy: dict[str, Any] | str | None = None, safeguard_llm_config: LLMConfig | None = None, mask_llm_config: LLMConfig | None = None, + isolate_agent_views: bool = False, ) -> tuple[ChatResult, ContextVariables, "Agent"]: """Initialize and run a group chat using a pattern for configuration. @@ -57,6 +58,10 @@ def initiate_group_chat( safeguard_policy: Optional safeguard policy dict or path to JSON file. safeguard_llm_config: Optional LLM configuration for safeguard checks. mask_llm_config: Optional LLM configuration for masking. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: ChatResult: Conversations chat history. @@ -82,6 +87,7 @@ def initiate_group_chat( ) = pattern.prepare_group_chat( max_rounds=max_rounds, messages=messages, + isolate_agent_views=isolate_agent_views, ) # Apply safeguards if provided @@ -126,6 +132,7 @@ async def a_initiate_group_chat( safeguard_policy: dict[str, Any] | str | None = None, safeguard_llm_config: LLMConfig | None = None, mask_llm_config: LLMConfig | None = None, + isolate_agent_views: bool = False, ) -> tuple[ChatResult, ContextVariables, "Agent"]: """Initialize and run a group chat using a pattern for configuration, asynchronously. @@ -136,6 +143,10 @@ async def a_initiate_group_chat( safeguard_policy: Optional safeguard policy dict or path to JSON file. safeguard_llm_config: Optional LLM configuration for safeguard checks. mask_llm_config: Optional LLM configuration for masking. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: ChatResult: Conversations chat history. @@ -161,6 +172,7 @@ async def a_initiate_group_chat( ) = pattern.prepare_group_chat( max_rounds=max_rounds, messages=messages, + isolate_agent_views=isolate_agent_views, ) # Apply safeguards if provided @@ -205,26 +217,24 @@ def run_group_chat( safeguard_policy: dict[str, Any] | str | None = None, safeguard_llm_config: LLMConfig | None = None, mask_llm_config: LLMConfig | None = None, + isolate_agent_views: bool = False, ) -> RunResponseProtocol: - """Run a group chat with multiple agents using the specified pattern. - - This method executes a multi-agent conversation in a background thread and returns - immediately with a RunResponse object that can be used to iterate over events. - - For step-by-step execution with control over each event, use run_group_chat_iter() instead. + """Run a group chat using a pattern for configuration in a separate thread. Args: - pattern: The pattern that defines how agents interact (e.g., AutoPattern, - RoundRobinPattern, RandomPattern). - messages: The initial message(s) to start the conversation. Can be a string - or a list of message dictionaries. - max_rounds: Maximum number of conversation rounds. Defaults to 20. - safeguard_policy: Optional safeguard policy for content filtering. - safeguard_llm_config: Optional LLM config for safeguard evaluation. - mask_llm_config: Optional LLM config for content masking. + pattern: Pattern object that encapsulates the chat configuration. + messages: Initial message(s). + max_rounds: Maximum number of conversation rounds. + safeguard_policy: Optional safeguard policy dict or path to JSON file. + safeguard_llm_config: Optional LLM configuration for safeguard checks. + mask_llm_config: Optional LLM configuration for masking. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: - RunResponseProtocol + RunResponseProtocol: Response object for tracking the async execution. """ iostream = ThreadIOStream() all_agents = pattern.agents + ([pattern.user_agent] if pattern.user_agent else []) @@ -237,6 +247,7 @@ def _initiate_group_chat( safeguard_policy: dict[str, Any] | str | None = safeguard_policy, safeguard_llm_config: LLMConfig | None = safeguard_llm_config, mask_llm_config: LLMConfig | None = mask_llm_config, + isolate_agent_views: bool = isolate_agent_views, iostream: ThreadIOStream = iostream, response: RunResponse = response, ) -> None: @@ -249,6 +260,7 @@ def _initiate_group_chat( safeguard_policy=safeguard_policy, safeguard_llm_config=safeguard_llm_config, mask_llm_config=mask_llm_config, + isolate_agent_views=isolate_agent_views, ) IOStream.get_default().send( @@ -278,26 +290,24 @@ async def a_run_group_chat( safeguard_policy: dict[str, Any] | str | None = None, safeguard_llm_config: LLMConfig | None = None, mask_llm_config: LLMConfig | None = None, + isolate_agent_views: bool = False, ) -> AsyncRunResponseProtocol: - """Async version of run_group_chat for running group chats in async contexts. - - This method executes a multi-agent conversation as an async task and returns - immediately with an AsyncRunResponse object that can be used to iterate over events. - - For step-by-step execution with control over each event, use a_run_group_chat_iter() instead. + """Run a group chat using a pattern for configuration asynchronously. Args: - pattern: The pattern that defines how agents interact (e.g., AutoPattern, - RoundRobinPattern, RandomPattern). - messages: The initial message(s) to start the conversation. Can be a string - or a list of message dictionaries. - max_rounds: Maximum number of conversation rounds. Defaults to 20. - safeguard_policy: Optional safeguard policy for content filtering. - safeguard_llm_config: Optional LLM config for safeguard evaluation. - mask_llm_config: Optional LLM config for content masking. + pattern: Pattern object that encapsulates the chat configuration. + messages: Initial message(s). + max_rounds: Maximum number of conversation rounds. + safeguard_policy: Optional safeguard policy dict or path to JSON file. + safeguard_llm_config: Optional LLM configuration for safeguard checks. + mask_llm_config: Optional LLM configuration for masking. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: - AsyncRunResponseProtocol + AsyncRunResponseProtocol: Response object for tracking the async execution. """ iostream = AsyncThreadIOStream() all_agents = pattern.agents + ([pattern.user_agent] if pattern.user_agent else []) @@ -310,6 +320,7 @@ async def _initiate_group_chat( safeguard_policy: dict[str, Any] | str | None = safeguard_policy, safeguard_llm_config: LLMConfig | None = safeguard_llm_config, mask_llm_config: LLMConfig | None = mask_llm_config, + isolate_agent_views: bool = isolate_agent_views, iostream: AsyncThreadIOStream = iostream, response: AsyncRunResponse = response, ) -> None: @@ -322,6 +333,7 @@ async def _initiate_group_chat( safeguard_policy=safeguard_policy, safeguard_llm_config=safeguard_llm_config, mask_llm_config=mask_llm_config, + isolate_agent_views=isolate_agent_views, ) iostream.send( diff --git a/autogen/agentchat/group/patterns/auto.py b/autogen/agentchat/group/patterns/auto.py index 2b8820b1b5d..0b47a2a7672 100644 --- a/autogen/agentchat/group/patterns/auto.py +++ b/autogen/agentchat/group/patterns/auto.py @@ -72,6 +72,7 @@ def prepare_group_chat( self, max_rounds: int, messages: list[dict[str, Any]] | str, + isolate_agent_views: bool = False, ) -> tuple[ list["ConversableAgent"], list["ConversableAgent"], @@ -96,6 +97,10 @@ def prepare_group_chat( Args: max_rounds: Maximum number of conversation rounds. messages: Initial message(s) to start the conversation. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: Tuple containing all necessary components for the group chat. @@ -120,6 +125,7 @@ def prepare_group_chat( components = super().prepare_group_chat( max_rounds=max_rounds, messages=messages, + isolate_agent_views=isolate_agent_views, ) # Extract the group_after_work and the rest of the components diff --git a/autogen/agentchat/group/patterns/manual.py b/autogen/agentchat/group/patterns/manual.py index ad85999d4b9..5053df12ec8 100644 --- a/autogen/agentchat/group/patterns/manual.py +++ b/autogen/agentchat/group/patterns/manual.py @@ -59,6 +59,7 @@ def prepare_group_chat( self, max_rounds: int, messages: list[dict[str, Any]] | str, + isolate_agent_views: bool = False, ) -> tuple[ list["ConversableAgent"], list["ConversableAgent"], @@ -83,6 +84,10 @@ def prepare_group_chat( Args: max_rounds: Maximum number of conversation rounds. messages: Initial message(s) to start the conversation. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: Tuple containing all necessary components for the group chat. @@ -91,6 +96,7 @@ def prepare_group_chat( components = super().prepare_group_chat( max_rounds=max_rounds, messages=messages, + isolate_agent_views=isolate_agent_views, ) # Extract the group_after_work and the rest of the components diff --git a/autogen/agentchat/group/patterns/pattern.py b/autogen/agentchat/group/patterns/pattern.py index 6d4c1cbd2eb..92342a3ae32 100644 --- a/autogen/agentchat/group/patterns/pattern.py +++ b/autogen/agentchat/group/patterns/pattern.py @@ -76,6 +76,7 @@ def prepare_group_chat( self, max_rounds: int, messages: list[dict[str, Any]] | str, + isolate_agent_views: bool = False, ) -> tuple[ list["ConversableAgent"], list["ConversableAgent"], @@ -99,6 +100,10 @@ def prepare_group_chat( Args: max_rounds: Maximum number of conversation rounds. messages: Initial message(s) to start the conversation. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: Tuple containing: @@ -147,6 +152,7 @@ def prepare_group_chat( messages=[], max_round=max_rounds, speaker_selection_method=group_transition, + isolate_agent_views=isolate_agent_views, ) # Create the group manager @@ -230,6 +236,7 @@ def prepare_group_chat( self, max_rounds: int, messages: list[dict[str, Any]] | str, + isolate_agent_views: bool = False, ) -> tuple[ list["ConversableAgent"], list["ConversableAgent"], @@ -253,6 +260,10 @@ def prepare_group_chat( Args: max_rounds: Maximum number of conversation rounds. messages: Initial message(s) to start the conversation. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: Tuple containing all necessary components for the group chat. @@ -275,6 +286,7 @@ def prepare_group_chat( ) = super().prepare_group_chat( max_rounds=max_rounds, messages=messages, + isolate_agent_views=isolate_agent_views, ) # Return all components with our group_after_work diff --git a/autogen/agentchat/group/patterns/random.py b/autogen/agentchat/group/patterns/random.py index 2e87d6a87e2..f5de7395e2c 100644 --- a/autogen/agentchat/group/patterns/random.py +++ b/autogen/agentchat/group/patterns/random.py @@ -37,6 +37,7 @@ def prepare_group_chat( self, max_rounds: int, messages: list[dict[str, Any]] | str, + isolate_agent_views: bool = False, ) -> tuple[ list["ConversableAgent"], list["ConversableAgent"], @@ -61,6 +62,10 @@ def prepare_group_chat( Args: max_rounds: Maximum number of conversation rounds. messages: Initial message(s) to start the conversation. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: Tuple containing all necessary components for the group chat. @@ -83,6 +88,7 @@ def prepare_group_chat( ) = super().prepare_group_chat( max_rounds=max_rounds, messages=messages, + isolate_agent_views=isolate_agent_views, ) # Create the random handoffs between agents diff --git a/autogen/agentchat/group/patterns/round_robin.py b/autogen/agentchat/group/patterns/round_robin.py index 49ab314d10c..98bfde44285 100644 --- a/autogen/agentchat/group/patterns/round_robin.py +++ b/autogen/agentchat/group/patterns/round_robin.py @@ -48,6 +48,7 @@ def prepare_group_chat( self, max_rounds: int, messages: list[dict[str, Any]] | str, + isolate_agent_views: bool = False, ) -> tuple[ list["ConversableAgent"], list["ConversableAgent"], @@ -72,6 +73,10 @@ def prepare_group_chat( Args: max_rounds: Maximum number of conversation rounds. messages: Initial message(s) to start the conversation. + isolate_agent_views: If True, agents will only maintain their own message history + and will not receive messages from other agents. When False (default), all agents + receive all messages. When True, messages are still stored in groupchat.messages + for the GroupChatManager's view, but are not broadcast to other agents. Returns: Tuple containing all necessary components for the group chat. @@ -94,6 +99,7 @@ def prepare_group_chat( ) = super().prepare_group_chat( max_rounds=max_rounds, messages=messages, + isolate_agent_views=isolate_agent_views, ) # Create the handoffs between agents diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index d8520fbce0b..77b9932cd29 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -128,6 +128,10 @@ def custom_speaker_selection_func( - select_speaker_auto_model_client_cls: Custom model client class for the internal speaker select agent used during 'auto' speaker selection (optional) - select_speaker_auto_llm_config: LLM config for the internal speaker select agent used during 'auto' speaker selection (optional) - role_for_select_speaker_messages: sets the role name for speaker selection when in 'auto' mode, typically 'user' or 'system'. (default: 'system') + - isolate_agent_views: if True, agents will only maintain their own message history and will not receive messages from other agents. + When False (default), all agents receive all messages (current behavior). When True, messages are still stored in + groupchat.messages for the GroupChatManager's view, but are not broadcast to other agents. Each agent will only + see their own messages when generating replies. (default: False) """ agents: list[Agent] @@ -165,6 +169,7 @@ def custom_speaker_selection_func( select_speaker_auto_model_client_cls: ModelClient | list[ModelClient] | None = None select_speaker_auto_llm_config: LLMConfig | dict[str, Any] | Literal[False] | None = None role_for_select_speaker_messages: str | None = "system" + isolate_agent_views: bool = False _VALID_SPEAKER_SELECTION_METHODS = ["auto", "manual", "random", "round_robin"] _VALID_SPEAKER_TRANSITIONS_TYPE = ["allowed", "disallowed", None] @@ -1212,8 +1217,9 @@ def run_chat( if send_introductions: # Broadcast the intro intro = groupchat.introductions_msg() - for agent in groupchat.agents: - self.send(intro, agent, request_reply=False, silent=True) + if not groupchat.isolate_agent_views: + for agent in groupchat.agents: + self.send(intro, agent, request_reply=False, silent=True) # NOTE: We do not also append to groupchat.messages, # since groupchat handles its own introductions @@ -1224,23 +1230,24 @@ def run_chat( for i in range(groupchat.max_round): self._last_speaker = speaker groupchat.append(message, speaker) - # broadcast the message to all agents except the speaker - for agent in groupchat.agents: - if agent != speaker: - inter_reply = groupchat._run_inter_agent_guardrails( - src_agent_name=speaker.name, - dst_agent_name=agent.name, - message_content=message, - ) - if inter_reply is not None: - replacement = ( - {"content": inter_reply, "name": speaker.name} - if not isinstance(inter_reply, dict) - else inter_reply + # broadcast the message to all agents except the speaker (unless isolate_agent_views is True) + if not groupchat.isolate_agent_views: + for agent in groupchat.agents: + if agent != speaker: + inter_reply = groupchat._run_inter_agent_guardrails( + src_agent_name=speaker.name, + dst_agent_name=agent.name, + message_content=message, ) - self.send(replacement, agent, request_reply=False, silent=True) - else: - self.send(message, agent, request_reply=False, silent=True) + if inter_reply is not None: + replacement = ( + {"content": inter_reply, "name": speaker.name} + if not isinstance(inter_reply, dict) + else inter_reply + ) + self.send(replacement, agent, request_reply=False, silent=True) + else: + self.send(message, agent, request_reply=False, silent=True) if self._is_termination_msg(message): # The conversation is over @@ -1257,8 +1264,60 @@ def run_chat( iostream = IOStream.get_default() iostream.send(GroupChatRunChatEvent(speaker=speaker, silent=silent)) + # If isolate_agent_views is True, build a temporary message list for the speaker + # that includes their own messages plus the last message, without persisting it. + # This allows the speaker to generate a reply while maintaining isolation. + if groupchat.isolate_agent_views: + # Get the speaker's own messages (their history with the manager) + speaker_own_messages = speaker._oai_messages.get(self, []) + # Build temporary message list: speaker's own messages + last message + # This gives context without adding the last message to persistent history + should_add_message = False + if message: + # Check if the message is a tool response + is_tool_response = message.get("role") == "tool" or message.get("tool_responses") + if is_tool_response: + # Only add tool responses if the speaker has the corresponding tool_call + # in their history (i.e., this tool response belongs to them) + msg_tool_call_ids = set() + for resp in message.get("tool_responses", []): + if resp.get("tool_call_id"): + msg_tool_call_ids.add(resp["tool_call_id"]) + if message.get("tool_call_id"): + msg_tool_call_ids.add(message["tool_call_id"]) + + # Check if speaker has tool_calls with matching IDs + speaker_tool_call_ids = set() + for hist_msg in speaker_own_messages: + for tc in hist_msg.get("tool_calls", []): + if tc.get("id"): + speaker_tool_call_ids.add(tc["id"]) + + # Only include if any of the tool response IDs match speaker's tool calls + # and it's not already in their history + if msg_tool_call_ids & speaker_tool_call_ids: + # Check if not already in history + already_in_history = False + if speaker_own_messages: + last_in_history = speaker_own_messages[-1] + if last_in_history.get("tool_responses"): + hist_ids = { + r.get("tool_call_id") for r in last_in_history.get("tool_responses", []) + } + already_in_history = msg_tool_call_ids and msg_tool_call_ids == hist_ids + should_add_message = not already_in_history + else: + # Not a tool response - always add it + should_add_message = True + + temp_messages = speaker_own_messages + [message] if should_add_message else speaker_own_messages + messages_for_reply = temp_messages + else: + # Normal flow: use speaker's persistent message history + messages_for_reply = speaker._oai_messages[self] + guardrails_activated = False - guardrails_reply = groupchat._run_input_guardrails(speaker, speaker._oai_messages[self]) + guardrails_reply = groupchat._run_input_guardrails(speaker, messages_for_reply) if guardrails_reply is not None: # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent @@ -1266,7 +1325,7 @@ def run_chat( reply = guardrails_reply else: # let the speaker speak - reply = speaker.generate_reply(sender=self) + reply = speaker.generate_reply(messages=messages_for_reply, sender=self) except KeyboardInterrupt: # let the admin agent speak if interrupted if groupchat.admin_name in groupchat.agent_names: @@ -1345,8 +1404,9 @@ async def a_run_chat( if send_introductions: # Broadcast the intro intro = groupchat.introductions_msg() - for agent in groupchat.agents: - await self.a_send(intro, agent, request_reply=False, silent=True) + if not groupchat.isolate_agent_views: + for agent in groupchat.agents: + await self.a_send(intro, agent, request_reply=False, silent=True) # NOTE: We do not also append to groupchat.messages, # since groupchat handles its own introductions @@ -1363,10 +1423,24 @@ async def a_run_chat( termination_reason = f"Termination message condition on the GroupChatManager '{self.name}' met" break - # broadcast the message to all agents except the speaker - for agent in groupchat.agents: - if agent != speaker: - await self.a_send(message, agent, request_reply=False, silent=True) + # broadcast the message to all agents except the speaker (unless isolate_agent_views is True) + if not groupchat.isolate_agent_views: + for agent in groupchat.agents: + if agent != speaker: + inter_reply = groupchat._run_inter_agent_guardrails( + src_agent_name=speaker.name, + dst_agent_name=agent.name, + message_content=message, + ) + if inter_reply is not None: + replacement = ( + {"content": inter_reply, "name": speaker.name} + if not isinstance(inter_reply, dict) + else inter_reply + ) + await self.a_send(replacement, agent, request_reply=False, silent=True) + else: + await self.a_send(message, agent, request_reply=False, silent=True) if i == groupchat.max_round - 1: # the last round termination_reason = f"Maximum rounds ({groupchat.max_round}) reached" @@ -1377,8 +1451,60 @@ async def a_run_chat( if not silent: iostream.send(GroupChatRunChatEvent(speaker=speaker, silent=silent)) + # If isolate_agent_views is True, build a temporary message list for the speaker + # that includes their own messages plus the last message, without persisting it. + # This allows the speaker to generate a reply while maintaining isolation. + if groupchat.isolate_agent_views: + # Get the speaker's own messages (their history with the manager) + speaker_own_messages = speaker._oai_messages.get(self, []) + # Build temporary message list: speaker's own messages + last message + # This gives context without adding the last message to persistent history + should_add_message = False + if message: + # Check if the message is a tool response + is_tool_response = message.get("role") == "tool" or message.get("tool_responses") + if is_tool_response: + # Only add tool responses if the speaker has the corresponding tool_call + # in their history (i.e., this tool response belongs to them) + msg_tool_call_ids = set() + for resp in message.get("tool_responses", []): + if resp.get("tool_call_id"): + msg_tool_call_ids.add(resp["tool_call_id"]) + if message.get("tool_call_id"): + msg_tool_call_ids.add(message["tool_call_id"]) + + # Check if speaker has tool_calls with matching IDs + speaker_tool_call_ids = set() + for hist_msg in speaker_own_messages: + for tc in hist_msg.get("tool_calls", []): + if tc.get("id"): + speaker_tool_call_ids.add(tc["id"]) + + # Only include if any of the tool response IDs match speaker's tool calls + # and it's not already in their history + if msg_tool_call_ids & speaker_tool_call_ids: + # Check if not already in history + already_in_history = False + if speaker_own_messages: + last_in_history = speaker_own_messages[-1] + if last_in_history.get("tool_responses"): + hist_ids = { + r.get("tool_call_id") for r in last_in_history.get("tool_responses", []) + } + already_in_history = msg_tool_call_ids and msg_tool_call_ids == hist_ids + should_add_message = not already_in_history + else: + # Not a tool response - always add it + should_add_message = True + + temp_messages = speaker_own_messages + [message] if should_add_message else speaker_own_messages + messages_for_reply = temp_messages + else: + # Normal flow: use speaker's persistent message history + messages_for_reply = speaker._oai_messages[self] + guardrails_activated = False - guardrails_reply = groupchat._run_input_guardrails(speaker, speaker._oai_messages[self]) + guardrails_reply = groupchat._run_input_guardrails(speaker, messages_for_reply) if guardrails_reply is not None: # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent @@ -1386,7 +1512,7 @@ async def a_run_chat( reply = guardrails_reply else: # let the speaker speak - reply = await speaker.a_generate_reply(sender=self) + reply = await speaker.a_generate_reply(messages=messages_for_reply, sender=self) except KeyboardInterrupt: # let the admin agent speak if interrupted if groupchat.admin_name in groupchat.agent_names: diff --git a/test/agentchat/group/patterns/test_pattern.py b/test/agentchat/group/patterns/test_pattern.py index da3bb0a1ed2..aca8220d970 100644 --- a/test/agentchat/group/patterns/test_pattern.py +++ b/test/agentchat/group/patterns/test_pattern.py @@ -25,6 +25,7 @@ def prepare_group_chat( self, max_rounds: int, messages: list[dict[str, Any]] | str, + isolate_agent_views: bool = False, ) -> tuple[ list["ConversableAgent"], list["ConversableAgent"], @@ -41,7 +42,7 @@ def prepare_group_chat( list[Any], ]: """Concrete implementation that just calls the parent method.""" - return super().prepare_group_chat(max_rounds, messages) + return super().prepare_group_chat(max_rounds, messages, isolate_agent_views) class TestPattern: @@ -200,7 +201,7 @@ def test_prepare_group_chat( ) # Call the method - result = pattern.prepare_group_chat(max_rounds=10, messages="Hello") + result = pattern.prepare_group_chat(max_rounds=10, messages="Hello", isolate_agent_views=False) # Check method calls mock_prepare_agents.assert_called_once_with(agents, context_variables, True) diff --git a/test/agentchat/test_groupchat.py b/test/agentchat/test_groupchat.py index 29fa4875664..16b6ef6cd35 100755 --- a/test/agentchat/test_groupchat.py +++ b/test/agentchat/test_groupchat.py @@ -586,6 +586,236 @@ def test_send_intros(): assert messages[1]["content"] == agent1._default_auto_reply +def test_isolate_agent_views(monkeypatch: MonkeyPatch): + """Test that when isolate_agent_views=True, agents only see their own messages.""" + agent1 = autogen.ConversableAgent( + "alice", + description="The first agent.", + max_consecutive_auto_reply=10, + human_input_mode="NEVER", + llm_config=False, + default_auto_reply="This is alice speaking.", + ) + agent2 = autogen.ConversableAgent( + "bob", + description="The second agent.", + max_consecutive_auto_reply=10, + human_input_mode="NEVER", + llm_config=False, + default_auto_reply="This is bob speaking.", + ) + agent3 = autogen.ConversableAgent( + "sam", + description="The third agent.", + max_consecutive_auto_reply=10, + human_input_mode="NEVER", + llm_config=False, + default_auto_reply="This is sam speaking.", + ) + + # Mock speaker selection so it doesn't require a GroupChatManager with an LLM + monkeypatch.setattr(GroupChat, "_auto_select_speaker", lambda *args, **kwargs: agent2) + + # Test with isolate_agent_views=True + groupchat = autogen.GroupChat( + agents=[agent1, agent2, agent3], + messages=[], + speaker_selection_method="round_robin", + max_round=3, + isolate_agent_views=True, + ) + + group_chat_manager = autogen.GroupChatManager( + groupchat=groupchat, + llm_config=False, + ) + + agent1.initiate_chat(group_chat_manager, message="Hello from alice") + + # Verify groupchat.messages contains all messages (manager's view) + assert len(groupchat.messages) == 3 + assert groupchat.messages[0]["name"] == "alice" + assert groupchat.messages[1]["name"] == "bob" + assert groupchat.messages[2]["name"] == "sam" + + # Verify agent1 only sees its own messages (not messages from agent2 or agent3) + # With isolate_agent_views=True, agent1 should not see messages broadcast from agent2 or agent3 + agent1_messages = agent1.chat_messages[group_chat_manager] + # Agent1 sends initial message, then gets selected again and sends reply + # It should NOT see agent2's or agent3's messages + assert len(agent1_messages) == 2 # Initial message + alice's reply + assert agent1_messages[0]["content"] == "Hello from alice" + assert agent1_messages[1]["content"] == "This is alice speaking." + # Verify agent1 doesn't see bob's or sam's messages + assert not any(msg.get("name") == "bob" for msg in agent1_messages) + assert not any(msg.get("name") == "sam" for msg in agent1_messages) + + # Verify agent2 only sees its own messages (not messages from agent1 or agent3) + agent2_messages = agent2.chat_messages[group_chat_manager] + # Agent2 should only see its own message, not agent1's or agent3's + assert len(agent2_messages) == 1 # Only bob's own message + assert agent2_messages[0]["content"] == "This is bob speaking." + assert agent2_messages[0]["name"] == "bob" + # Verify agent2 doesn't see alice's or sam's messages + assert not any(msg.get("name") == "alice" for msg in agent2_messages) + assert not any(msg.get("name") == "sam" for msg in agent2_messages) + + # Verify agent3 only sees its own messages (not messages from agent1 or agent2) + agent3_messages = agent3.chat_messages[group_chat_manager] + # Agent3 should only see its own message, not agent1's or agent2's + assert len(agent3_messages) == 1 # Only sam's own message + assert agent3_messages[0]["content"] == "This is sam speaking." + assert agent3_messages[0]["name"] == "sam" + # Verify agent3 doesn't see alice's or bob's messages + assert not any(msg.get("name") == "alice" for msg in agent3_messages) + assert not any(msg.get("name") == "bob" for msg in agent3_messages) + + # Reset agents for next test + agent1.reset() + agent2.reset() + agent3.reset() + + # Test with isolate_agent_views=True and send_introductions=True + groupchat2 = autogen.GroupChat( + agents=[agent1, agent2, agent3], + messages=[], + speaker_selection_method="round_robin", + max_round=2, + isolate_agent_views=True, + send_introductions=True, + ) + + group_chat_manager2 = autogen.GroupChatManager( + groupchat=groupchat2, + llm_config=False, + is_termination_msg=lambda x: x.get("content", "").rstrip().find("TERMINATE") >= 0, + ) + + group_chat_manager2.initiate_chat(group_chat_manager2, message="The initiating message.") + + # Verify introductions were NOT sent to agents when isolate_agent_views=True + for a in [agent1, agent2, agent3]: + messages = a.chat_messages[group_chat_manager2] + # Should only have the initiating message and their own reply, no intro + assert len(messages) == 2 + assert messages[0]["content"] == "The initiating message." + # Each agent should only see their own reply + if a == agent1: + assert messages[1]["content"] == agent1._default_auto_reply + elif a == agent2: + assert messages[1]["content"] == agent2._default_auto_reply + else: + assert messages[1]["content"] == agent3._default_auto_reply + + # Reset agents for comparison test + agent1.reset() + agent2.reset() + agent3.reset() + + # Test with isolate_agent_views=False (default) for comparison + groupchat3 = autogen.GroupChat( + agents=[agent1, agent2, agent3], + messages=[], + speaker_selection_method="round_robin", + max_round=2, + isolate_agent_views=False, + ) + + group_chat_manager3 = autogen.GroupChatManager( + groupchat=groupchat3, + llm_config=False, + is_termination_msg=lambda x: x.get("content", "").rstrip().find("TERMINATE") >= 0, + ) + + agent1.initiate_chat(group_chat_manager3, message="Hello from alice") + + # Verify that with isolate_agent_views=False, agents DO see messages from other agents + agent1_messages = agent1.chat_messages[group_chat_manager3] + # Should see: initial message, alice's reply, bob's message + assert len(agent1_messages) >= 2 + assert agent1_messages[0]["content"] == "Hello from alice" + # Should see bob's message (broadcast) + assert any(msg.get("name") == "bob" for msg in agent1_messages) + + +@pytest.mark.asyncio +async def test_isolate_agent_views_async(): + """Test that when isolate_agent_views=True, agents only see their own messages in async mode.""" + agent1 = autogen.ConversableAgent( + "alice", + description="The first agent.", + max_consecutive_auto_reply=10, + human_input_mode="NEVER", + llm_config=False, + default_auto_reply="This is alice speaking.", + ) + agent2 = autogen.ConversableAgent( + "bob", + description="The second agent.", + max_consecutive_auto_reply=10, + human_input_mode="NEVER", + llm_config=False, + default_auto_reply="This is bob speaking.", + ) + agent3 = autogen.ConversableAgent( + "sam", + description="The third agent.", + max_consecutive_auto_reply=10, + human_input_mode="NEVER", + llm_config=False, + default_auto_reply="This is sam speaking.", + ) + + # Test with isolate_agent_views=True using round_robin (doesn't need LLM) + groupchat = autogen.GroupChat( + agents=[agent1, agent2, agent3], + messages=[], + speaker_selection_method="round_robin", + max_round=3, + isolate_agent_views=True, + ) + + group_chat_manager = autogen.GroupChatManager( + groupchat=groupchat, + llm_config=False, + ) + + await agent1.a_initiate_chat(group_chat_manager, message="Hello from alice") + + # Verify groupchat.messages contains all messages (manager's view) + assert len(groupchat.messages) == 3 + assert groupchat.messages[0]["name"] == "alice" + assert groupchat.messages[1]["name"] == "bob" + assert groupchat.messages[2]["name"] == "sam" + + # Verify agent1 only sees its own messages (not messages from agent2 or agent3) + agent1_messages = agent1.chat_messages[group_chat_manager] + assert len(agent1_messages) == 2 # Initial message + alice's reply + assert agent1_messages[0]["content"] == "Hello from alice" + assert agent1_messages[1]["content"] == "This is alice speaking." + # Verify agent1 doesn't see bob's or sam's messages + assert not any(msg.get("name") == "bob" for msg in agent1_messages) + assert not any(msg.get("name") == "sam" for msg in agent1_messages) + + # Verify agent2 only sees its own messages (not messages from agent1 or agent3) + agent2_messages = agent2.chat_messages[group_chat_manager] + assert len(agent2_messages) == 1 # Only bob's own message + assert agent2_messages[0]["content"] == "This is bob speaking." + assert agent2_messages[0]["name"] == "bob" + # Verify agent2 doesn't see alice's or sam's messages + assert not any(msg.get("name") == "alice" for msg in agent2_messages) + assert not any(msg.get("name") == "sam" for msg in agent2_messages) + + # Verify agent3 only sees its own messages (not messages from agent1 or agent2) + agent3_messages = agent3.chat_messages[group_chat_manager] + assert len(agent3_messages) == 1 # Only sam's own message + assert agent3_messages[0]["content"] == "This is sam speaking." + assert agent3_messages[0]["name"] == "sam" + # Verify agent3 doesn't see alice's or bob's messages + assert not any(msg.get("name") == "alice" for msg in agent3_messages) + assert not any(msg.get("name") == "bob" for msg in agent3_messages) + + def test_selection_helpers(): agent1 = autogen.ConversableAgent( "alice", diff --git a/website/docs/user-guide/advanced-concepts/orchestration/group-chat/introduction.mdx b/website/docs/user-guide/advanced-concepts/orchestration/group-chat/introduction.mdx index be738dcc477..387b268fe7a 100644 --- a/website/docs/user-guide/advanced-concepts/orchestration/group-chat/introduction.mdx +++ b/website/docs/user-guide/advanced-concepts/orchestration/group-chat/introduction.mdx @@ -169,7 +169,8 @@ Here's the complete, ready-to-run code for our enhanced financial compliance sys result, context, last_agent = initiate_group_chat( pattern=pattern, messages="My laptop keeps shutting down randomly. Can you help?", - max_rounds=10 + max_rounds=10, + isolate_agent_views=False # Optional: If True, agents only see their own messages ) ``` @@ -259,6 +260,28 @@ Replying as user. Provide feedback to chat_manager. Press enter to skip and use >>>>>>>> TERMINATING RUN (1353f5d3-ce7c-46e4-a713-b05729b682bb): No reply generated ``` +## Isolating Agent Views + +By default, all agents in a group chat receive all messages from other agents, allowing them to see the full conversation context. However, you can enable **isolated agent views** by setting `isolate_agent_views=True` when calling `initiate_group_chat()` or `run_group_chat()`. + +When `isolate_agent_views=True`: +- **Agents only see their own messages**: Each agent maintains only their own message history and does not receive messages broadcast from other agents +- **Messages are still stored centrally**: All messages are still stored in `groupchat.messages` for the GroupChatManager's view and coordination +- **Useful for privacy**: This is useful when you want agents to work independently without seeing each other's messages, such as in scenarios where agents need to maintain separate contexts or when privacy between agents is important + +Example: + +```python +result, context, last_agent = initiate_group_chat( + pattern=pattern, + messages="My laptop keeps shutting down randomly. Can you help?", + max_rounds=10, + isolate_agent_views=True # Agents will only see their own messages +) +``` + +Note: When using isolated views, agents won't have access to the full conversation context, which may impact their ability to respond appropriately. Use this feature when you specifically need agent isolation. + ## Next Steps Now that you understand the basics of Group Chat, the following sections will dive deeper into: diff --git a/website/docs/user-guide/basic-concepts/introducing-group-chat.mdx b/website/docs/user-guide/basic-concepts/introducing-group-chat.mdx index 1b28c374469..f57848063a8 100644 --- a/website/docs/user-guide/basic-concepts/introducing-group-chat.mdx +++ b/website/docs/user-guide/basic-concepts/introducing-group-chat.mdx @@ -78,6 +78,7 @@ pattern = AutoPattern( result, context_variables, last_agent = initiate_group_chat( pattern=pattern, messages="Initial request", # Starting message + isolate_agent_views=False, # Optional: If True, agents only see their own messages ) ``` @@ -730,6 +731,27 @@ You are trained on data up to October 2023. Notice that the conversation automatically ends after the summary is generated, without requiring the human to type "exit". +## Isolating Agent Views + +By default, all agents in a group chat receive all messages from other agents, allowing them to see the full conversation context. However, you can enable **isolated agent views** by setting `isolate_agent_views=True` when calling `initiate_group_chat()` or `run_group_chat()`. + +When `isolate_agent_views=True`: +- **Agents only see their own messages**: Each agent maintains only their own message history and does not receive messages broadcast from other agents +- **Messages are still stored centrally**: All messages are still stored in `groupchat.messages` for the GroupChatManager's view and coordination +- **Useful for privacy**: This is useful when you want agents to work independently without seeing each other's messages, such as in scenarios where agents need to maintain separate contexts or when privacy between agents is important + +Example: + +```python +result, _, _ = initiate_group_chat( + pattern=pattern, + messages=initial_prompt, + isolate_agent_views=True # Agents will only see their own messages +) +``` + +Note: When using isolated views, agents won't have access to the full conversation context, which may impact their ability to respond appropriately. Use this feature when you specifically need agent isolation. + ## Next Steps: Extending the System Our financial compliance system demonstrates the power of the group chat pattern, but we've only scratched the surface. In our next section, we'll explore how to enhance our system with specialized tools.