Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion libs/langchain_v1/langchain/agents/middleware/tool_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,10 +223,20 @@ def _process_selection_response(
request: ModelRequest,
) -> ModelRequest:
"""Process the selection response and return filtered `ModelRequest`."""
# Validate response structure
tools_value = response.get("tools")
if tools_value is None or not isinstance(tools_value, list):
msg = (
"LLM returned invalid response for tool selection. "
f"Expected dict with 'tools' list, got: {response!r}. "
"Consider using a model with better structured output support."
)
raise ValueError(msg)

selected_tool_names: list[str] = []
invalid_tool_selections = []

for tool_name in response["tools"]:
for tool_name in tools_value:
if tool_name not in valid_tool_names:
invalid_tool_selections.append(tool_name)
continue
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -594,3 +594,69 @@ def test_empty_tools_list_raises_error(self) -> None:
"""Test that empty tools list raises an error in schema creation."""
with pytest.raises(AssertionError, match="tools must be non-empty"):
_create_tool_selection_response([])

def test_malformed_response_missing_tools_key(self) -> None:
"""Test that malformed LLM response without 'tools' key raises ValueError."""
# Selector returns response without 'tools' key
tool_selection_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{
"name": "ToolSelectionResponse",
"id": "1",
"args": {}, # Missing 'tools' key
}
],
),
]
)
)

model = FakeModel(messages=iter([AIMessage(content="Done")]))

tool_selector = LLMToolSelectorMiddleware(max_tools=2, model=tool_selection_model)

agent = create_agent(
model=model,
tools=[get_weather, search_web],
middleware=[tool_selector],
)

with pytest.raises(ValueError, match="LLM returned invalid response"):
agent.invoke({"messages": [HumanMessage("test")]})

def test_malformed_response_null_tools(self) -> None:
"""Test that LLM response with null 'tools' value raises ValueError."""
# Selector returns response with null 'tools'
tool_selection_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{
"name": "ToolSelectionResponse",
"id": "1",
"args": {"tools": None}, # null value
}
],
),
]
)
)

model = FakeModel(messages=iter([AIMessage(content="Done")]))

tool_selector = LLMToolSelectorMiddleware(max_tools=2, model=tool_selection_model)

agent = create_agent(
model=model,
tools=[get_weather, search_web],
middleware=[tool_selector],
)

with pytest.raises(ValueError, match="LLM returned invalid response"):
agent.invoke({"messages": [HumanMessage("test")]})