diff --git a/, b/, new file mode 100644 index 000000000..e69de29bb diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index a7ce62983..10a8702c8 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -242,6 +242,7 @@ async def _fetch_response( else NOT_GIVEN ) + system_instructions = self._non_null_or_not_given(system_instructions) tool_choice = Converter.convert_tool_choice(model_settings.tool_choice) converted_tools = Converter.convert_tools(tools, handoffs) response_format = Converter.get_response_format(output_schema) @@ -255,6 +256,7 @@ async def _fetch_response( else: logger.debug( f"Calling LLM {self.model} with input:\n" + f"System instructions: {system_instructions}\n" f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n" f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" @@ -265,7 +267,7 @@ async def _fetch_response( return await self._client.responses.create( previous_response_id=self._non_null_or_not_given(previous_response_id), - instructions=self._non_null_or_not_given(system_instructions), + instructions=system_instructions, model=self.model, input=list_input, include=include,