Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 36 additions & 3 deletions python/packages/core/agent_framework/openai/_responses_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -541,11 +541,15 @@ async def _prepare_options(
}
run_options: dict[str, Any] = {k: v for k, v in options.items() if k not in exclude_keys and v is not None}

# Determine conversation ID early to inform message preparation
conversation_id = self._get_current_conversation_id(options, **kwargs)
is_continuation = bool(conversation_id and conversation_id.startswith("resp_"))

# messages
# Handle instructions by prepending to messages as system message
if instructions := options.get("instructions"):
messages = prepend_instructions_to_messages(list(messages), instructions, role="system")
request_input = self._prepare_messages_for_openai(messages)
request_input = self._prepare_messages_for_openai(messages, filter_for_continuation=is_continuation)
if not request_input:
raise ServiceInvalidRequestError("Messages are required for chat completions")
run_options["input"] = request_input
Expand All @@ -565,7 +569,7 @@ async def _prepare_options(
run_options[new_key] = run_options.pop(old_key)

# Handle different conversation ID formats
if conversation_id := self._get_current_conversation_id(options, **kwargs):
if conversation_id:
if conversation_id.startswith("resp_"):
# For response IDs, set previous_response_id and remove conversation property
run_options["previous_response_id"] = conversation_id
Expand Down Expand Up @@ -626,7 +630,9 @@ def _get_current_conversation_id(self, options: Mapping[str, Any], **kwargs: Any
"""
return kwargs.get("conversation_id") or options.get("conversation_id")

def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]:
def _prepare_messages_for_openai(
self, chat_messages: Sequence[ChatMessage], filter_for_continuation: bool = False
) -> list[dict[str, Any]]:
"""Prepare the chat messages for a request.

Allowing customization of the key names for role/author, and optionally overriding the role.
Expand All @@ -635,10 +641,16 @@ def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) ->
They require a "tool_call_id" and (function) "name" key, and the "metadata" key should
be removed. The "encoding" key should also be removed.

When using previous_response_id for conversation continuation, the Responses API expects
only NEW user messages (and system/developer messages), not the full conversation history.
Assistant messages and function results are already stored server-side.

Override this method to customize the formatting of the chat history for a request.

Args:
chat_messages: The chat history to prepare.
filter_for_continuation: If True, filter out assistant messages and function results
for continuation with previous_response_id.

Returns:
The prepared chat messages for a request.
Expand All @@ -652,6 +664,27 @@ def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) ->
and "fc_id" in content.additional_properties
):
call_id_to_id[content.call_id] = content.additional_properties["fc_id"] # type: ignore[attr-defined, index]

# Filter messages if continuing a conversation with previous_response_id
if filter_for_continuation:
# Find the last assistant message index
last_assistant_idx = -1
for idx, message in enumerate(chat_messages):
if message.role == "assistant":
last_assistant_idx = idx

# When using previous_response_id, filter out assistant and function result messages
# but keep system/developer/user messages (the API accepts these roles)
if last_assistant_idx >= 0:
# Collect system/developer messages from before the last assistant
system_messages = [
msg for msg in chat_messages[:last_assistant_idx] if msg.role in ("system", "developer")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The developer role is handled here but no test verifies it. Consider adding a test with a developer-role message to ensure it is preserved during continuation filtering.

Suggested change
msg for msg in chat_messages[:last_assistant_idx] if msg.role in ("system", "developer")
msg for msg in chat_messages[:last_assistant_idx] if msg.role in ("system", "developer")

]
# Get all messages after the last assistant (new user messages)
new_messages = chat_messages[last_assistant_idx + 1 :]
# Combine: system messages + new messages
chat_messages = system_messages + list(new_messages)
Comment on lines +683 to +686
Copy link

Copilot AI Feb 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the continuation filtering path, new_messages = chat_messages[last_assistant_idx + 1:] is appended without restricting roles. If a tool message (e.g., function_result/tool output) exists after the last assistant turn (possible in tool-loop error/retry scenarios), it will be sent to the Responses API even though this block’s comment/docstring says assistant messages and function results should be filtered out. Consider explicitly filtering the retained messages to roles the Responses API accepts for previous_response_id continuation (e.g., keep system/developer plus only user messages after the last assistant; drop tool and any other roles).

Suggested change
# Get all messages after the last assistant (new user messages)
new_messages = chat_messages[last_assistant_idx + 1 :]
# Combine: system messages + new messages
chat_messages = system_messages + list(new_messages)
# Get all messages after the last assistant, but keep only supported roles
# (system/developer/user) for continuation.
new_messages = [
msg
for msg in chat_messages[last_assistant_idx + 1 :]
if msg.role in ("system", "developer", "user")
]
# Combine: system messages + filtered new messages
chat_messages = system_messages + new_messages

Copilot uses AI. Check for mistakes.

list_of_list = [self._prepare_message_for_openai(message, call_id_to_id) for message in chat_messages]
# Flatten the list of lists into a single list
return list(chain.from_iterable(list_of_list))
Expand Down
82 changes: 82 additions & 0 deletions python/packages/core/tests/openai/test_openai_responses_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -2105,6 +2105,88 @@ async def test_conversation_id_precedence_kwargs_over_options() -> None:
assert "conversation" not in run_opts


async def test_message_filtering_with_previous_response_id() -> None:
"""Test that assistant messages are filtered when using previous_response_id."""
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing coverage for function result messages. The docstring on the production code explicitly states function results should be filtered. Add a test with ChatMessage(role='tool', ...) or function-result messages in the conversation history to verify they are excluded when using previous_response_id.

client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")

# Create a multi-turn conversation with history
messages = [
ChatMessage(role="system", text="You are a helpful assistant"),
ChatMessage(role="user", text="My name is Alice"),
ChatMessage(role="assistant", text="Nice to meet you, Alice!"),
ChatMessage(role="user", text="What's my name?"),
]

# When using previous_response_id, assistant messages should be filtered but system messages preserved
options = await client._prepare_options(
messages,
{"conversation_id": "resp_12345"}, # Using resp_ prefix
) # type: ignore

# Should include: system message + last user message
assert "input" in options
input_messages = options["input"]
assert len(input_messages) == 2, f"Expected 2 messages (system + user), got {len(input_messages)}"
assert input_messages[0]["role"] == "system"
assert input_messages[1]["role"] == "user"
assert "What's my name?" in str(input_messages[1])

# Verify previous_response_id is set
assert options["previous_response_id"] == "resp_12345"


Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This test only includes a single assistant message. The production code specifically tracks the last assistant message index via a loop. Add a test with multiple assistant/user turns (e.g., system → user → assistant → user → assistant → user) to verify that only messages after the final assistant are kept and that earlier user messages are correctly dropped.

async def test_message_filtering_without_previous_response_id() -> None:
"""Test that all messages are included when NOT using previous_response_id."""
client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")

# Same conversation as above
messages = [
ChatMessage(role="system", text="You are a helpful assistant"),
ChatMessage(role="user", text="My name is Alice"),
ChatMessage(role="assistant", text="Nice to meet you, Alice!"),
ChatMessage(role="user", text="What's my name?"),
]

# Without conversation_id, all messages should be included
options = await client._prepare_options(messages, {}) # type: ignore

# Should include all messages
assert "input" in options
input_messages = options["input"]
# System (1) + User (1) + Assistant (1) + User (1) = 4 messages
assert len(input_messages) == 4

# Verify previous_response_id is NOT set
assert "previous_response_id" not in options


async def test_message_filtering_with_conv_prefix() -> None:
"""Test that messages are NOT filtered when using conv_ prefix (conversation ID)."""
client = OpenAIResponsesClient(model_id="test-model", api_key="test-key")

messages = [
ChatMessage(role="system", text="You are a helpful assistant"),
ChatMessage(role="user", text="My name is Alice"),
ChatMessage(role="assistant", text="Nice to meet you, Alice!"),
ChatMessage(role="user", text="What's my name?"),
]

# When using conv_ prefix, should use conversation parameter, not previous_response_id
options = await client._prepare_options(
messages,
{"conversation_id": "conv_abc123"}, # Using conv_ prefix
) # type: ignore

# All messages should be included (no filtering for conversation IDs)
assert "input" in options
input_messages = options["input"]
assert len(input_messages) == 4

# Verify conversation is set, not previous_response_id
assert options.get("conversation") == "conv_abc123"
assert "previous_response_id" not in options

Comment on lines +2108 to +2188
Copy link

Copilot AI Feb 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The new tests cover filtering assistant messages for resp_* continuation, but don’t cover the case where a tool message appears after the last assistant turn (which the client intends to filter out for previous_response_id). Adding a test that includes a ChatMessage(role="tool", ...) after the last assistant message and asserting it is excluded from options["input"] would better lock in the intended fix and prevent regressions.

Copilot uses AI. Check for mistakes.

def test_with_callable_api_key() -> None:
"""Test OpenAIResponsesClient initialization with callable API key."""

Expand Down
Loading