Skip to content

Commit 6745b79

Browse files
committed
going back to sequence based approach
1 parent 4f33433 commit 6745b79

File tree

2 files changed

+38
-50
lines changed

2 files changed

+38
-50
lines changed

libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py

Lines changed: 13 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""Human in the loop middleware."""
22

3-
from typing import Any, Literal, cast
3+
from typing import Any, Literal
44

55
from langchain_core.messages import AIMessage, ToolCall, ToolMessage
66
from langgraph.types import interrupt
@@ -40,7 +40,6 @@ class HumanInterrupt(TypedDict):
4040
args: Arguments for the action (tool kwargs)
4141
config: Configuration defining what actions are allowed
4242
description: Optional detailed description of what input is needed
43-
tool_call_id: Identifier for the associated tool call
4443
4544
Example:
4645
# Send the interrupt request and get the response
@@ -54,7 +53,6 @@ class HumanInterrupt(TypedDict):
5453
allow_approve=True, # Allow direct acceptance
5554
),
5655
description="Please review the command before execution",
57-
tool_call_id="call_123",
5856
)
5957
# Send the interrupt request and get the response
6058
response = interrupt([request])[0]
@@ -65,37 +63,32 @@ class HumanInterrupt(TypedDict):
6563
args: dict
6664
config: HumanInterruptConfig
6765
description: str | None
68-
tool_call_id: str
6966

7067

7168
class ApprovePayload(TypedDict):
7269
"""Human chose to approve the current state without changes."""
7370

7471
type: Literal["approve"]
75-
tool_call_id: str
7672

7773

7874
class IgnorePayload(TypedDict):
7975
"""Human chose to ignore/skip the current step with optional tool message customization."""
8076

8177
type: Literal["ignore"]
82-
tool_call_id: str
8378
tool_message: NotRequired[str | ToolMessage]
8479

8580

8681
class ResponsePayload(TypedDict):
8782
"""Human provided text feedback or instructions."""
8883

8984
type: Literal["response"]
90-
tool_call_id: str
9185
tool_message: str | ToolMessage
9286

9387

9488
class EditPayload(TypedDict):
9589
"""Human chose to edit/modify the current state/content."""
9690

9791
type: Literal["edit"]
98-
tool_call_id: str
9992
action: str
10093
args: dict
10194

@@ -173,7 +166,8 @@ def after_model(self, state: AgentState) -> dict[str, Any] | None: # noqa: PLR0
173166

174167
# Create interrupt requests for all tools that need approval
175168
interrupt_requests: list[HumanInterrupt] = []
176-
for tool_call in interrupt_tool_calls.values():
169+
interrupt_tool_calls_list = list(interrupt_tool_calls.values())
170+
for tool_call in interrupt_tool_calls_list:
177171
tool_name = tool_call["name"]
178172
tool_args = tool_call["args"]
179173
description = f"{self.message_prefix}\n\nTool: {tool_name}\nArgs: {tool_args}"
@@ -184,22 +178,21 @@ def after_model(self, state: AgentState) -> dict[str, Any] | None: # noqa: PLR0
184178
"args": tool_args,
185179
"config": tool_config,
186180
"description": description,
187-
# ids should always be present on tool calls
188-
"tool_call_id": cast("str", tool_call["id"]),
189181
}
190182
interrupt_requests.append(request)
191183

192184
responses: list[HumanResponse] = interrupt(interrupt_requests)
193185

194-
for response in responses:
195-
try:
196-
tool_call = interrupt_tool_calls[response["tool_call_id"]]
197-
except KeyError:
198-
msg = (
199-
f"Unexpected human response: {response}. "
200-
f"Expected one with `'tool_call_id'` in {list(interrupt_tool_calls.keys())}."
201-
)
202-
raise ValueError(msg)
186+
# Validate that the number of responses matches the number of interrupt tool calls
187+
if len(responses) != len(interrupt_tool_calls_list):
188+
msg = (
189+
f"Number of human responses ({len(responses)}) does not match "
190+
f"number of hanging tool calls ({len(interrupt_tool_calls_list)})."
191+
)
192+
raise ValueError(msg)
193+
194+
for i, response in enumerate(responses):
195+
tool_call = interrupt_tool_calls_list[i]
203196

204197
tool_config = self.tool_configs[tool_call["name"]]
205198

libs/langchain_v1/tests/unit_tests/agents/test_middleware_agent.py

Lines changed: 25 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -416,7 +416,7 @@ def test_human_in_the_loop_middleware_single_tool_accept() -> None:
416416
state = {"messages": [HumanMessage(content="Hello"), ai_message]}
417417

418418
def mock_accept(requests):
419-
return [{"type": "approve", "args": None, "tool_call_id": "1"}]
419+
return [{"type": "approve"}]
420420

421421
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_accept):
422422
result = middleware.after_model(state)
@@ -449,7 +449,6 @@ def mock_edit(requests):
449449
"type": "edit",
450450
"action": "test_tool",
451451
"args": {"input": "edited"},
452-
"tool_call_id": "1",
453452
}
454453
]
455454

@@ -479,7 +478,7 @@ def test_human_in_the_loop_middleware_single_tool_ignore() -> None:
479478
state = {"messages": [HumanMessage(content="Hello"), ai_message]}
480479

481480
def mock_ignore(requests):
482-
return [{"type": "ignore", "args": None, "tool_call_id": "1"}]
481+
return [{"type": "ignore"}]
483482

484483
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_ignore):
485484
result = middleware.after_model(state)
@@ -511,7 +510,7 @@ def test_human_in_the_loop_middleware_single_tool_response() -> None:
511510
state = {"messages": [HumanMessage(content="Hello"), ai_message]}
512511

513512
def mock_response(requests):
514-
return [{"type": "response", "tool_message": "Custom response", "tool_call_id": "1"}]
513+
return [{"type": "response", "tool_message": "Custom response"}]
515514

516515
with patch(
517516
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_response
@@ -552,8 +551,8 @@ def test_human_in_the_loop_middleware_multiple_tools_mixed_responses() -> None:
552551

553552
def mock_mixed_responses(requests):
554553
return [
555-
{"type": "approve", "args": None, "tool_call_id": "1"},
556-
{"type": "ignore", "args": None, "tool_call_id": "2"},
554+
{"type": "approve"},
555+
{"type": "ignore"},
557556
]
558557

559558
with patch(
@@ -605,13 +604,11 @@ def mock_edit_responses(requests):
605604
"type": "edit",
606605
"action": "get_forecast",
607606
"args": {"location": "New York"},
608-
"tool_call_id": "1",
609607
},
610608
{
611609
"type": "edit",
612610
"action": "get_temperature",
613611
"args": {"location": "New York"},
614-
"tool_call_id": "2",
615612
},
616613
]
617614

@@ -657,12 +654,10 @@ def mock_response_responses(requests):
657654
{
658655
"type": "response",
659656
"tool_message": "actually, please get the conditions in NYC",
660-
"tool_call_id": "1",
661657
},
662658
{
663659
"type": "response",
664660
"tool_message": "actually, please get the temperature in NYC",
665-
"tool_call_id": "2",
666661
},
667662
]
668663

@@ -704,12 +699,12 @@ def test_human_in_the_loop_middleware_unknown_response_type() -> None:
704699
state = {"messages": [HumanMessage(content="Hello"), ai_message]}
705700

706701
def mock_unknown(requests):
707-
return [{"type": "unknown", "args": None, "tool_call_id": "1"}]
702+
return [{"type": "unknown"}]
708703

709704
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_unknown):
710705
with pytest.raises(
711706
ValueError,
712-
match="Unexpected human response: {'type': 'unknown', 'args': None, 'tool_call_id': '1'}. Response type 'unknown' is not allowed for tool 'test_tool'. Expected one with `'type'` in \\['accept', 'edit', 'response', 'ignore'\\] based on the tool's interrupt configuration.",
707+
match="Unexpected human response: {'type': 'unknown'}. Response type 'unknown' is not allowed for tool 'test_tool'. Expected one with `'type'` in \\['accept', 'edit', 'response', 'ignore'\\] based on the tool's interrupt configuration.",
713708
):
714709
middleware.after_model(state)
715710

@@ -731,15 +726,15 @@ def test_human_in_the_loop_middleware_disallowed_response_type() -> None:
731726
state = {"messages": [HumanMessage(content="Hello"), ai_message]}
732727

733728
def mock_disallowed_response(requests):
734-
return [{"type": "response", "args": "Custom response", "tool_call_id": "1"}]
729+
return [{"type": "response", "tool_message": "Custom response"}]
735730

736731
with patch(
737732
"langchain.agents.middleware.human_in_the_loop.interrupt",
738733
side_effect=mock_disallowed_response,
739734
):
740735
with pytest.raises(
741736
ValueError,
742-
match="Unexpected human response: {'type': 'response', 'args': 'Custom response', 'tool_call_id': '1'}. Response type 'response' is not allowed for tool 'test_tool'. Expected one with `'type'` in \\['accept', 'ignore'\\] based on the tool's interrupt configuration.",
737+
match="Unexpected human response: {'type': 'response', 'tool_message': 'Custom response'}. Response type 'response' is not allowed for tool 'test_tool'. Expected one with `'type'` in \\['accept', 'ignore'\\] based on the tool's interrupt configuration.",
743738
):
744739
middleware.after_model(state)
745740

@@ -766,7 +761,6 @@ def mock_disallowed_edit(requests):
766761
"type": "edit",
767762
"action": "test_tool",
768763
"args": {"input": "edited"},
769-
"tool_call_id": "1",
770764
}
771765
]
772766

@@ -775,7 +769,7 @@ def mock_disallowed_edit(requests):
775769
):
776770
with pytest.raises(
777771
ValueError,
778-
match="Unexpected human response: {'type': 'edit', 'action': 'test_tool', 'args': {'input': 'edited'}, 'tool_call_id': '1'}. Response type 'edit' is not allowed for tool 'test_tool'. Expected one with `'type'` in \\['accept', 'response', 'ignore'\\] based on the tool's interrupt configuration.",
772+
match="Unexpected human response: {'type': 'edit', 'action': 'test_tool', 'args': {'input': 'edited'}}. Response type 'edit' is not allowed for tool 'test_tool'. Expected one with `'type'` in \\['accept', 'response', 'ignore'\\] based on the tool's interrupt configuration.",
779773
):
780774
middleware.after_model(state)
781775

@@ -800,7 +794,7 @@ def test_human_in_the_loop_middleware_mixed_auto_approved_and_interrupt() -> Non
800794
state = {"messages": [HumanMessage(content="Hello"), ai_message]}
801795

802796
def mock_accept(requests):
803-
return [{"type": "approve", "args": None, "tool_call_id": "2"}]
797+
return [{"type": "approve"}]
804798

805799
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_accept):
806800
result = middleware.after_model(state)
@@ -839,8 +833,8 @@ def test_human_in_the_loop_middleware_all_ignored() -> None:
839833

840834
def mock_all_ignore(requests):
841835
return [
842-
{"type": "ignore", "args": None, "tool_call_id": "1"},
843-
{"type": "ignore", "args": None, "tool_call_id": "2"},
836+
{"type": "ignore"},
837+
{"type": "ignore"},
844838
]
845839

846840
with patch(
@@ -883,7 +877,7 @@ def test_human_in_the_loop_middleware_interrupt_request_structure() -> None:
883877

884878
def mock_capture_requests(requests):
885879
captured_requests.extend(requests)
886-
return [{"type": "approve", "args": None, "tool_call_id": "1"}]
880+
return [{"type": "approve"}]
887881

888882
with patch(
889883
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_capture_requests
@@ -919,7 +913,7 @@ def test_human_in_the_loop_middleware_boolean_configs() -> None:
919913
# Test approve
920914
with patch(
921915
"langchain.agents.middleware.human_in_the_loop.interrupt",
922-
return_value=[{"type": "approve", "tool_call_id": "1"}],
916+
return_value=[{"type": "approve"}],
923917
):
924918
result = middleware.after_model(state)
925919
assert result is not None
@@ -933,7 +927,6 @@ def test_human_in_the_loop_middleware_boolean_configs() -> None:
933927
return_value=[
934928
{
935929
"type": "edit",
936-
"tool_call_id": "1",
937930
"action": "test_tool",
938931
"args": {"input": "edited"},
939932
}
@@ -948,7 +941,7 @@ def test_human_in_the_loop_middleware_boolean_configs() -> None:
948941
# Test ignore
949942
with patch(
950943
"langchain.agents.middleware.human_in_the_loop.interrupt",
951-
return_value=[{"type": "ignore", "tool_call_id": "1"}],
944+
return_value=[{"type": "ignore"}],
952945
):
953946
result = middleware.after_model(state)
954947
assert result is not None
@@ -963,7 +956,7 @@ def test_human_in_the_loop_middleware_boolean_configs() -> None:
963956
# Test response
964957
with patch(
965958
"langchain.agents.middleware.human_in_the_loop.interrupt",
966-
return_value=[{"type": "response", "tool_call_id": "1", "tool_message": "Custom response"}],
959+
return_value=[{"type": "response", "tool_message": "Custom response"}],
967960
):
968961
result = middleware.after_model(state)
969962
assert result is not None
@@ -981,8 +974,8 @@ def test_human_in_the_loop_middleware_boolean_configs() -> None:
981974
assert result is None
982975

983976

984-
def test_human_in_the_loop_middleware_missing_tool_call_id() -> None:
985-
"""Test that missing tool call ID in resume raises an error."""
977+
def test_human_in_the_loop_middleware_sequence_mismatch() -> None:
978+
"""Test that sequence mismatch in resume raises an error."""
986979
middleware = HumanInTheLoopMiddleware(tool_configs={"test_tool": True})
987980

988981
ai_message = AIMessage(
@@ -991,23 +984,25 @@ def test_human_in_the_loop_middleware_missing_tool_call_id() -> None:
991984
)
992985
state = {"messages": [HumanMessage(content="Hello"), ai_message]}
993986

987+
# Test with too few responses
994988
with patch(
995989
"langchain.agents.middleware.human_in_the_loop.interrupt",
996-
return_value=[{"type": "approve"}], # Missing tool_call_id
990+
return_value=[], # No responses for 1 tool call
997991
):
998992
with pytest.raises(
999993
ValueError,
1000-
match=r"Unexpected human response: \{'type': 'approve'\}\. Expected one with `'tool_call_id'` in \['1'\]\.",
994+
match=r"Number of human responses \(0\) does not match number of hanging tool calls \(1\)\.",
1001995
):
1002996
middleware.after_model(state)
1003997

998+
# Test with too many responses
1004999
with patch(
10051000
"langchain.agents.middleware.human_in_the_loop.interrupt",
1006-
return_value=[{"type": "approve", "tool_call_id": "nonexistent"}],
1001+
return_value=[{"type": "approve"}, {"type": "approve"}], # 2 responses for 1 tool call
10071002
):
10081003
with pytest.raises(
10091004
ValueError,
1010-
match=r"Unexpected human response: \{'type': 'approve', 'tool_call_id': 'nonexistent'\}\. Expected one with `'tool_call_id'` in \['1'\]\.",
1005+
match=r"Number of human responses \(2\) does not match number of hanging tool calls \(1\)\.",
10111006
):
10121007
middleware.after_model(state)
10131008

0 commit comments

Comments
 (0)