Skip to content

Commit 75435ec

Browse files
committed
Gracefully handle empty outputs
1 parent 2d42617 commit 75435ec

File tree

2 files changed

+40
-1
lines changed

2 files changed

+40
-1
lines changed

src/guardrails/checks/text/llm_base.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,8 +386,10 @@ async def run_llm(
386386

387387
result = response.choices[0].message.content
388388
if not result:
389+
# Use base LLMOutput for empty responses to avoid validation errors
390+
# with extended models that have required fields (e.g., LLMReasoningOutput)
389391
return (
390-
output_model(
392+
LLMOutput(
391393
flagged=False,
392394
confidence=0.0,
393395
),

tests/unit/checks/test_llm_base.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -301,3 +301,40 @@ async def fake_run_llm(
301301
assert "reason" not in result.info # noqa: S101
302302
assert result.info["flagged"] is True # noqa: S101
303303
assert result.info["confidence"] == 0.8 # noqa: S101
304+
305+
306+
@pytest.mark.asyncio
307+
async def test_run_llm_handles_empty_response_with_reasoning_output(monkeypatch: pytest.MonkeyPatch) -> None:
308+
"""When response content is empty, should return base LLMOutput even if output_model is LLMReasoningOutput."""
309+
from types import SimpleNamespace
310+
311+
from guardrails.checks.text.llm_base import LLMReasoningOutput, run_llm
312+
313+
# Mock response with empty content
314+
mock_response = SimpleNamespace(
315+
choices=[SimpleNamespace(message=SimpleNamespace(content=""))],
316+
usage=SimpleNamespace(prompt_tokens=10, completion_tokens=0, total_tokens=10),
317+
)
318+
319+
async def fake_request_chat_completion(**kwargs: Any) -> Any: # noqa: ARG001
320+
return mock_response
321+
322+
monkeypatch.setattr(llm_base, "_request_chat_completion", fake_request_chat_completion)
323+
324+
# Call run_llm with LLMReasoningOutput (which requires a reason field)
325+
result, token_usage = await run_llm(
326+
text="test input",
327+
system_prompt="test prompt",
328+
client=SimpleNamespace(), # type: ignore[arg-type]
329+
model="gpt-test",
330+
output_model=LLMReasoningOutput,
331+
)
332+
333+
# Should return LLMOutput (not LLMReasoningOutput) to avoid validation error
334+
assert isinstance(result, LLMOutput) # noqa: S101
335+
assert result.flagged is False # noqa: S101
336+
assert result.confidence == 0.0 # noqa: S101
337+
# Should NOT have a reason field since we returned base LLMOutput
338+
assert not hasattr(result, "reason") or not hasattr(result, "__dict__") or "reason" not in result.__dict__ # noqa: S101
339+
assert token_usage.prompt_tokens == 10 # noqa: S101
340+
assert token_usage.completion_tokens == 0 # noqa: S101

0 commit comments

Comments
 (0)