diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 5ab414dc85..55fdaeeda3 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -782,8 +782,12 @@ async def _prepare_options( # messages # Handle instructions by prepending to messages as system message - if instructions := options.get("instructions"): + # Only prepend instructions for the first turn (when no conversation/response ID exists) + conversation_id = self._get_current_conversation_id(options, **kwargs) + if (instructions := options.get("instructions")) and not conversation_id: + # First turn: prepend instructions as system message messages = prepend_instructions_to_messages(list(messages), instructions, role="system") + # Continuation turn: instructions already exist in conversation context, skip prepending request_input = self._prepare_messages_for_openai(messages) if not request_input: raise ServiceInvalidRequestError("Messages are required for chat completions") diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index a83c4a398b..749939783e 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -2168,6 +2168,90 @@ async def test_conversation_id_precedence_kwargs_over_options() -> None: assert "conversation" not in run_opts +def _create_mock_responses_text_response(*, response_id: str) -> MagicMock: + mock_response = MagicMock() + mock_response.id = response_id + mock_response.model = "test-model" + mock_response.created_at = 1000000000 + mock_response.output_parsed = None + mock_response.metadata = {} + mock_response.usage = None + mock_response.finish_reason = None + + mock_message_content = MagicMock() + mock_message_content.type = "output_text" + mock_message_content.text = "Hello! How can I help?" + mock_message_content.annotations = [] + + mock_message_item = MagicMock() + mock_message_item.type = "message" + mock_message_item.content = [mock_message_content] + + mock_response.output = [mock_message_item] + return mock_response + + +async def test_instructions_sent_first_turn_then_skipped_for_continuation() -> None: + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + mock_response = _create_mock_responses_text_response(response_id="resp_123") + + with patch.object(client.client.responses, "create", return_value=mock_response) as mock_create: + await client.get_response( + messages=[Message(role="user", text="Hello")], + options={"instructions": "Reply in uppercase."}, + ) + + first_input_messages = mock_create.call_args.kwargs["input"] + assert len(first_input_messages) == 2 + assert first_input_messages[0]["role"] == "system" + assert any("Reply in uppercase" in str(c) for c in first_input_messages[0]["content"]) + assert first_input_messages[1]["role"] == "user" + + await client.get_response( + messages=[Message(role="user", text="Tell me a joke")], + options={"instructions": "Reply in uppercase.", "conversation_id": "resp_123"}, + ) + + second_input_messages = mock_create.call_args.kwargs["input"] + assert len(second_input_messages) == 1 + assert second_input_messages[0]["role"] == "user" + assert not any(message["role"] == "system" for message in second_input_messages) + + +@pytest.mark.parametrize("conversation_id", ["resp_456", "conv_abc123"]) +async def test_instructions_not_repeated_for_continuation_ids(conversation_id: str) -> None: + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + mock_response = _create_mock_responses_text_response(response_id="resp_456") + + with patch.object(client.client.responses, "create", return_value=mock_response) as mock_create: + await client.get_response( + messages=[Message(role="user", text="Continue conversation")], + options={"instructions": "Be helpful.", "conversation_id": conversation_id}, + ) + + input_messages = mock_create.call_args.kwargs["input"] + assert len(input_messages) == 1 + assert input_messages[0]["role"] == "user" + assert not any(message["role"] == "system" for message in input_messages) + + +async def test_instructions_included_without_conversation_id() -> None: + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + mock_response = _create_mock_responses_text_response(response_id="resp_new") + + with patch.object(client.client.responses, "create", return_value=mock_response) as mock_create: + await client.get_response( + messages=[Message(role="user", text="Hello")], + options={"instructions": "You are a helpful assistant."}, + ) + + input_messages = mock_create.call_args.kwargs["input"] + assert len(input_messages) == 2 + assert input_messages[0]["role"] == "system" + assert any("helpful assistant" in str(c) for c in input_messages[0]["content"]) + assert input_messages[1]["role"] == "user" + + def test_with_callable_api_key() -> None: """Test OpenAIResponsesClient initialization with callable API key."""