-
Notifications
You must be signed in to change notification settings - Fork 1.2k
Python: Fix OpenAI Responses API 400 error on multi-turn conversations with previous_response_id #3800
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Python: Fix OpenAI Responses API 400 error on multi-turn conversations with previous_response_id #3800
Changes from all commits
2d3c177
7dc4503
3b9dc74
dc505af
0e6af11
7f90856
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -541,11 +541,15 @@ async def _prepare_options( | |||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||
| run_options: dict[str, Any] = {k: v for k, v in options.items() if k not in exclude_keys and v is not None} | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| # Determine conversation ID early to inform message preparation | ||||||||||||||||||||||||||||
| conversation_id = self._get_current_conversation_id(options, **kwargs) | ||||||||||||||||||||||||||||
| is_continuation = bool(conversation_id and conversation_id.startswith("resp_")) | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| # messages | ||||||||||||||||||||||||||||
| # Handle instructions by prepending to messages as system message | ||||||||||||||||||||||||||||
| if instructions := options.get("instructions"): | ||||||||||||||||||||||||||||
| messages = prepend_instructions_to_messages(list(messages), instructions, role="system") | ||||||||||||||||||||||||||||
| request_input = self._prepare_messages_for_openai(messages) | ||||||||||||||||||||||||||||
| request_input = self._prepare_messages_for_openai(messages, filter_for_continuation=is_continuation) | ||||||||||||||||||||||||||||
| if not request_input: | ||||||||||||||||||||||||||||
| raise ServiceInvalidRequestError("Messages are required for chat completions") | ||||||||||||||||||||||||||||
| run_options["input"] = request_input | ||||||||||||||||||||||||||||
|
|
@@ -565,7 +569,7 @@ async def _prepare_options( | |||||||||||||||||||||||||||
| run_options[new_key] = run_options.pop(old_key) | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| # Handle different conversation ID formats | ||||||||||||||||||||||||||||
| if conversation_id := self._get_current_conversation_id(options, **kwargs): | ||||||||||||||||||||||||||||
| if conversation_id: | ||||||||||||||||||||||||||||
| if conversation_id.startswith("resp_"): | ||||||||||||||||||||||||||||
| # For response IDs, set previous_response_id and remove conversation property | ||||||||||||||||||||||||||||
| run_options["previous_response_id"] = conversation_id | ||||||||||||||||||||||||||||
|
|
@@ -626,7 +630,9 @@ def _get_current_conversation_id(self, options: Mapping[str, Any], **kwargs: Any | |||||||||||||||||||||||||||
| """ | ||||||||||||||||||||||||||||
| return kwargs.get("conversation_id") or options.get("conversation_id") | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: | ||||||||||||||||||||||||||||
| def _prepare_messages_for_openai( | ||||||||||||||||||||||||||||
| self, chat_messages: Sequence[ChatMessage], filter_for_continuation: bool = False | ||||||||||||||||||||||||||||
| ) -> list[dict[str, Any]]: | ||||||||||||||||||||||||||||
| """Prepare the chat messages for a request. | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| Allowing customization of the key names for role/author, and optionally overriding the role. | ||||||||||||||||||||||||||||
|
|
@@ -635,10 +641,16 @@ def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> | |||||||||||||||||||||||||||
| They require a "tool_call_id" and (function) "name" key, and the "metadata" key should | ||||||||||||||||||||||||||||
| be removed. The "encoding" key should also be removed. | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| When using previous_response_id for conversation continuation, the Responses API expects | ||||||||||||||||||||||||||||
| only NEW user messages (and system/developer messages), not the full conversation history. | ||||||||||||||||||||||||||||
| Assistant messages and function results are already stored server-side. | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| Override this method to customize the formatting of the chat history for a request. | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| Args: | ||||||||||||||||||||||||||||
| chat_messages: The chat history to prepare. | ||||||||||||||||||||||||||||
| filter_for_continuation: If True, filter out assistant messages and function results | ||||||||||||||||||||||||||||
| for continuation with previous_response_id. | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| Returns: | ||||||||||||||||||||||||||||
| The prepared chat messages for a request. | ||||||||||||||||||||||||||||
|
|
@@ -652,6 +664,27 @@ def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> | |||||||||||||||||||||||||||
| and "fc_id" in content.additional_properties | ||||||||||||||||||||||||||||
| ): | ||||||||||||||||||||||||||||
| call_id_to_id[content.call_id] = content.additional_properties["fc_id"] # type: ignore[attr-defined, index] | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| # Filter messages if continuing a conversation with previous_response_id | ||||||||||||||||||||||||||||
| if filter_for_continuation: | ||||||||||||||||||||||||||||
| # Find the last assistant message index | ||||||||||||||||||||||||||||
| last_assistant_idx = -1 | ||||||||||||||||||||||||||||
| for idx, message in enumerate(chat_messages): | ||||||||||||||||||||||||||||
| if message.role == "assistant": | ||||||||||||||||||||||||||||
| last_assistant_idx = idx | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| # When using previous_response_id, filter out assistant and function result messages | ||||||||||||||||||||||||||||
| # but keep system/developer/user messages (the API accepts these roles) | ||||||||||||||||||||||||||||
| if last_assistant_idx >= 0: | ||||||||||||||||||||||||||||
| # Collect system/developer messages from before the last assistant | ||||||||||||||||||||||||||||
| system_messages = [ | ||||||||||||||||||||||||||||
| msg for msg in chat_messages[:last_assistant_idx] if msg.role in ("system", "developer") | ||||||||||||||||||||||||||||
| ] | ||||||||||||||||||||||||||||
| # Get all messages after the last assistant (new user messages) | ||||||||||||||||||||||||||||
| new_messages = chat_messages[last_assistant_idx + 1 :] | ||||||||||||||||||||||||||||
| # Combine: system messages + new messages | ||||||||||||||||||||||||||||
| chat_messages = system_messages + list(new_messages) | ||||||||||||||||||||||||||||
|
Comment on lines
+683
to
+686
|
||||||||||||||||||||||||||||
| # Get all messages after the last assistant (new user messages) | |
| new_messages = chat_messages[last_assistant_idx + 1 :] | |
| # Combine: system messages + new messages | |
| chat_messages = system_messages + list(new_messages) | |
| # Get all messages after the last assistant, but keep only supported roles | |
| # (system/developer/user) for continuation. | |
| new_messages = [ | |
| msg | |
| for msg in chat_messages[last_assistant_idx + 1 :] | |
| if msg.role in ("system", "developer", "user") | |
| ] | |
| # Combine: system messages + filtered new messages | |
| chat_messages = system_messages + new_messages |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2105,6 +2105,88 @@ async def test_conversation_id_precedence_kwargs_over_options() -> None: | |
| assert "conversation" not in run_opts | ||
|
|
||
|
|
||
| async def test_message_filtering_with_previous_response_id() -> None: | ||
| """Test that assistant messages are filtered when using previous_response_id.""" | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Missing coverage for function result messages. The docstring on the production code explicitly states function results should be filtered. Add a test with ChatMessage(role='tool', ...) or function-result messages in the conversation history to verify they are excluded when using previous_response_id. |
||
| client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") | ||
|
|
||
| # Create a multi-turn conversation with history | ||
| messages = [ | ||
| ChatMessage(role="system", text="You are a helpful assistant"), | ||
| ChatMessage(role="user", text="My name is Alice"), | ||
| ChatMessage(role="assistant", text="Nice to meet you, Alice!"), | ||
| ChatMessage(role="user", text="What's my name?"), | ||
| ] | ||
|
|
||
| # When using previous_response_id, assistant messages should be filtered but system messages preserved | ||
| options = await client._prepare_options( | ||
| messages, | ||
| {"conversation_id": "resp_12345"}, # Using resp_ prefix | ||
| ) # type: ignore | ||
|
|
||
| # Should include: system message + last user message | ||
| assert "input" in options | ||
| input_messages = options["input"] | ||
| assert len(input_messages) == 2, f"Expected 2 messages (system + user), got {len(input_messages)}" | ||
| assert input_messages[0]["role"] == "system" | ||
| assert input_messages[1]["role"] == "user" | ||
| assert "What's my name?" in str(input_messages[1]) | ||
|
|
||
| # Verify previous_response_id is set | ||
| assert options["previous_response_id"] == "resp_12345" | ||
|
|
||
|
|
||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This test only includes a single assistant message. The production code specifically tracks the last assistant message index via a loop. Add a test with multiple assistant/user turns (e.g., system → user → assistant → user → assistant → user) to verify that only messages after the final assistant are kept and that earlier user messages are correctly dropped. |
||
| async def test_message_filtering_without_previous_response_id() -> None: | ||
| """Test that all messages are included when NOT using previous_response_id.""" | ||
| client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") | ||
|
|
||
| # Same conversation as above | ||
| messages = [ | ||
| ChatMessage(role="system", text="You are a helpful assistant"), | ||
| ChatMessage(role="user", text="My name is Alice"), | ||
| ChatMessage(role="assistant", text="Nice to meet you, Alice!"), | ||
| ChatMessage(role="user", text="What's my name?"), | ||
| ] | ||
|
|
||
| # Without conversation_id, all messages should be included | ||
| options = await client._prepare_options(messages, {}) # type: ignore | ||
|
|
||
| # Should include all messages | ||
| assert "input" in options | ||
| input_messages = options["input"] | ||
| # System (1) + User (1) + Assistant (1) + User (1) = 4 messages | ||
| assert len(input_messages) == 4 | ||
|
|
||
| # Verify previous_response_id is NOT set | ||
| assert "previous_response_id" not in options | ||
|
|
||
|
|
||
| async def test_message_filtering_with_conv_prefix() -> None: | ||
| """Test that messages are NOT filtered when using conv_ prefix (conversation ID).""" | ||
| client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") | ||
|
|
||
| messages = [ | ||
| ChatMessage(role="system", text="You are a helpful assistant"), | ||
| ChatMessage(role="user", text="My name is Alice"), | ||
| ChatMessage(role="assistant", text="Nice to meet you, Alice!"), | ||
| ChatMessage(role="user", text="What's my name?"), | ||
| ] | ||
|
|
||
| # When using conv_ prefix, should use conversation parameter, not previous_response_id | ||
| options = await client._prepare_options( | ||
| messages, | ||
| {"conversation_id": "conv_abc123"}, # Using conv_ prefix | ||
| ) # type: ignore | ||
|
|
||
| # All messages should be included (no filtering for conversation IDs) | ||
| assert "input" in options | ||
| input_messages = options["input"] | ||
| assert len(input_messages) == 4 | ||
|
|
||
| # Verify conversation is set, not previous_response_id | ||
| assert options.get("conversation") == "conv_abc123" | ||
| assert "previous_response_id" not in options | ||
|
|
||
|
Comment on lines
+2108
to
+2188
|
||
|
|
||
| def test_with_callable_api_key() -> None: | ||
| """Test OpenAIResponsesClient initialization with callable API key.""" | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The developer role is handled here but no test verifies it. Consider adding a test with a developer-role message to ensure it is preserved during continuation filtering.