@@ -301,3 +301,40 @@ async def fake_run_llm(
301301 assert "reason" not in result .info # noqa: S101
302302 assert result .info ["flagged" ] is True # noqa: S101
303303 assert result .info ["confidence" ] == 0.8 # noqa: S101
304+
305+
306+ @pytest .mark .asyncio
307+ async def test_run_llm_handles_empty_response_with_reasoning_output (monkeypatch : pytest .MonkeyPatch ) -> None :
308+ """When response content is empty, should return base LLMOutput even if output_model is LLMReasoningOutput."""
309+ from types import SimpleNamespace
310+
311+ from guardrails .checks .text .llm_base import LLMReasoningOutput , run_llm
312+
313+ # Mock response with empty content
314+ mock_response = SimpleNamespace (
315+ choices = [SimpleNamespace (message = SimpleNamespace (content = "" ))],
316+ usage = SimpleNamespace (prompt_tokens = 10 , completion_tokens = 0 , total_tokens = 10 ),
317+ )
318+
319+ async def fake_request_chat_completion (** kwargs : Any ) -> Any : # noqa: ARG001
320+ return mock_response
321+
322+ monkeypatch .setattr (llm_base , "_request_chat_completion" , fake_request_chat_completion )
323+
324+ # Call run_llm with LLMReasoningOutput (which requires a reason field)
325+ result , token_usage = await run_llm (
326+ text = "test input" ,
327+ system_prompt = "test prompt" ,
328+ client = SimpleNamespace (), # type: ignore[arg-type]
329+ model = "gpt-test" ,
330+ output_model = LLMReasoningOutput ,
331+ )
332+
333+ # Should return LLMOutput (not LLMReasoningOutput) to avoid validation error
334+ assert isinstance (result , LLMOutput ) # noqa: S101
335+ assert result .flagged is False # noqa: S101
336+ assert result .confidence == 0.0 # noqa: S101
337+ # Should NOT have a reason field since we returned base LLMOutput
338+ assert not hasattr (result , "reason" ) or not hasattr (result , "__dict__" ) or "reason" not in result .__dict__ # noqa: S101
339+ assert token_usage .prompt_tokens == 10 # noqa: S101
340+ assert token_usage .completion_tokens == 0 # noqa: S101
0 commit comments