Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 35 additions & 23 deletions python/packages/core/agent_framework/openai/_chat_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,35 +359,47 @@ def _prepare_chat_history_for_request(
# region Parsers

def _openai_chat_message_parser(self, message: ChatMessage) -> list[dict[str, Any]]:
"""Parse a chat message into the openai format."""
all_messages: list[dict[str, Any]] = []
for content in message.contents:
# Skip approval content - it's internal framework state, not for the LLM
if isinstance(content, (FunctionApprovalRequestContent, FunctionApprovalResponseContent)):
continue
msg: dict[str, Any] = {
"role": message.role.value if isinstance(message.role, Role) else message.role,
}
if message.additional_properties:
msg["metadata"] = message.additional_properties

args: dict[str, Any] = {
"role": message.role.value if isinstance(message.role, Role) else message.role,
}
for content in message.contents:
match content:
case FunctionCallContent():
if all_messages and "tool_calls" in all_messages[-1]:
# If the last message already has tool calls, append to it
all_messages[-1]["tool_calls"].append(self._openai_content_parser(content))
else:
args["tool_calls"] = [self._openai_content_parser(content)] # type: ignore
if "tool_calls" not in msg:
msg["tool_calls"] = []
msg["tool_calls"].append(self._openai_content_parser(content))
case FunctionResultContent():
args["tool_call_id"] = content.call_id
# FunctionResultContent requires a separate message with role=tool
# Handle both result and exception cases
if content.result is not None:
args["content"] = prepare_function_call_results(content.result)
result_content = prepare_function_call_results(content.result)
elif content.exception is not None:
# Send the exception message to the model
result_content = "Error: " + str(content.exception)
else:
result_content = ""

return [
{
"role": "tool",
"tool_call_id": content.call_id,
"content": result_content,
}
]
case FunctionApprovalRequestContent() | FunctionApprovalResponseContent():
# These need special handling - add them as content
if "content" not in msg:
msg["content"] = []
msg["content"].append(self._openai_content_parser(content))
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this will add a dict based on our class to the list, I don't think that makes sense, could you 1) add test cases with this and 2) add integration tests that have this. This is used with MCP server side tool calling, so the output format should be determined by openai, not by our type.

case _:
if "content" not in args:
args["content"] = []
# this is a list to allow multi-modal content
args["content"].append(self._openai_content_parser(content)) # type: ignore
if "content" in args or "tool_calls" in args:
all_messages.append(args)
return all_messages
if "content" not in msg:
msg["content"] = []
msg["content"].append(self._openai_content_parser(content))

return [msg] if ("content" in msg or "tool_calls" in msg) else []

def _openai_content_parser(self, content: Contents) -> dict[str, Any]:
"""Parse contents into the openai format."""
Expand Down
76 changes: 53 additions & 23 deletions python/packages/core/agent_framework/openai/_responses_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,36 +479,58 @@ def _prepare_chat_messages_for_request(self, chat_messages: Sequence[ChatMessage
# Flatten the list of lists into a single list
return list(chain.from_iterable(list_of_list))

def _openai_chat_message_parser(
self,
message: ChatMessage,
call_id_to_id: dict[str, str],
) -> list[dict[str, Any]]:
"""Parse a chat message into the openai format."""
def _openai_chat_message_parser(self, message: ChatMessage, callidtoid: dict[str, str]) -> list[dict[str, Any]]:
"""Parse a chat message into the openai Responses format.

Returns a list because some content types (like approval responses) need to be
top-level items separate from the message structure.
"""
all_messages: list[dict[str, Any]] = []
args: dict[str, Any] = {
msg: dict[str, Any] = {
"role": message.role.value if isinstance(message.role, Role) else message.role,
}
if message.additional_properties:
msg["metadata"] = message.additional_properties

for content in message.contents:
match content:
case TextReasoningContent():
# Don't send reasoning content back to model
continue
case FunctionResultContent():
new_args: dict[str, Any] = {}
new_args.update(self._openai_content_parser(message.role, content, call_id_to_id))
all_messages.append(new_args)
case FunctionApprovalRequestContent():
# Approval requests are top-level items in the Responses API
all_messages.append(self._openai_content_parser(message.role, content, callidtoid))
case FunctionApprovalResponseContent():
# Approval responses are top-level items in the Responses API
all_messages.append(self._openai_content_parser(message.role, content, callidtoid))
case FunctionCallContent():
function_call = self._openai_content_parser(message.role, content, call_id_to_id)
all_messages.append(function_call) # type: ignore
case FunctionApprovalResponseContent() | FunctionApprovalRequestContent():
all_messages.append(self._openai_content_parser(message.role, content, call_id_to_id)) # type: ignore
if "tool_calls" not in msg:
msg["tool_calls"] = []
msg["tool_calls"].append(self._openai_content_parser(message.role, content, callidtoid))
case FunctionResultContent():
# FunctionResultContent requires separate message with role=tool
# Handle both result and exception cases
if content.result is not None:
result_content = prepare_function_call_results(content.result)
elif content.exception is not None:
# Send the exception message to the model
result_content = "Error: " + str(content.exception)
else:
result_content = ""

return [
{
"role": "tool",
"tool_call_id": content.call_id,
"content": result_content,
}
]
case _:
if "content" not in args:
args["content"] = []
args["content"].append(self._openai_content_parser(message.role, content, call_id_to_id)) # type: ignore
if "content" in args or "tool_calls" in args:
all_messages.append(args)
if "content" not in msg:
msg["content"] = []
msg["content"].append(self._openai_content_parser(message.role, content, callidtoid))

# Only add the main message if it has content or tool_calls
if "content" in msg or "tool_calls" in msg:
all_messages.append(msg)

return all_messages

def _openai_content_parser(
Expand Down Expand Up @@ -597,6 +619,14 @@ def _openai_content_parser(
"type": "function_call_output",
"output": prepare_function_call_results(content.result),
}
# Handle both result and exception cases
if content.result is not None:
args["output"] = prepare_function_call_results(content.result)
elif content.exception is not None:
# Send the exception message to the model
args["output"] = "Error: " + str(content.exception)
else:
args["output"] = ""
return args
case FunctionApprovalRequestContent():
return {
Expand Down
179 changes: 179 additions & 0 deletions python/packages/core/tests/openai/test_openai_chat_message_parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,179 @@
# Copyright (c) Microsoft. All rights reserved.

from agent_framework import (
ChatMessage,
FunctionCallContent,
Role,
TextContent,
)
from agent_framework.openai import OpenAIChatClient


class TestOpenAIChatMessageParser:
"""Test cases for _openai_chat_message_parser method."""

def test_chat_message_with_text_and_tool_calls_combined(self):
"""
Test that a ChatMessage containing both text content and tool calls
is correctly parsed into a SINGLE message with both 'content' and 'tool_calls' fields.

This verifies the fix for issue #2410 where messages were incorrectly split.
"""
# Arrange
client = OpenAIChatClient(model_id="gpt-4", api_key="test-key")

message = ChatMessage(
role=Role.ASSISTANT,
contents=[
TextContent(text="I'll help you with that calculation."),
FunctionCallContent(call_id="call-123", name="calculate", arguments={"x": 5, "y": 3}),
],
)

# Act
result = client._openai_chat_message_parser(message)

# Assert
assert len(result) == 1, "Should return exactly one message, not split into multiple"

parsed_message = result[0]

# Verify the message has the correct role
assert parsed_message["role"] == "assistant"

# Verify both content and tool_calls are present in the SAME message
assert "content" in parsed_message, "Message should contain 'content' field"
assert "tool_calls" in parsed_message, "Message should contain 'tool_calls' field"

# Verify content is correctly formatted
assert isinstance(parsed_message["content"], list)
assert len(parsed_message["content"]) == 1
assert parsed_message["content"][0]["type"] == "text"
assert parsed_message["content"][0]["text"] == "I'll help you with that calculation."

# Verify tool_calls is correctly formatted
assert isinstance(parsed_message["tool_calls"], list)
assert len(parsed_message["tool_calls"]) == 1
assert parsed_message["tool_calls"][0]["id"] == "call-123"
assert parsed_message["tool_calls"][0]["type"] == "function"
assert parsed_message["tool_calls"][0]["function"]["name"] == "calculate"

def test_chat_message_with_multiple_tool_calls_and_text(self):
"""
Test that a ChatMessage with text and multiple tool calls
keeps everything in a single message.
"""
# Arrange
client = OpenAIChatClient(model_id="gpt-4", api_key="test-key")

message = ChatMessage(
role=Role.ASSISTANT,
contents=[
TextContent(text="Let me check both databases for you."),
FunctionCallContent(
call_id="call-456", name="query_database_a", arguments={"query": "SELECT * FROM users"}
),
FunctionCallContent(
call_id="call-789", name="query_database_b", arguments={"query": "SELECT * FROM products"}
),
],
)

# Act
result = client._openai_chat_message_parser(message)

# Assert
assert len(result) == 1, "Should return exactly one message with all tool calls"

parsed_message = result[0]
assert parsed_message["role"] == "assistant"
assert "content" in parsed_message
assert "tool_calls" in parsed_message

# Verify multiple tool calls are in the same message
assert len(parsed_message["tool_calls"]) == 2
assert parsed_message["tool_calls"][0]["id"] == "call-456"
assert parsed_message["tool_calls"][1]["id"] == "call-789"

def test_chat_message_with_only_text(self):
"""
Test that a ChatMessage with only text content works correctly.
"""
# Arrange
client = OpenAIChatClient(model_id="gpt-4", api_key="test-key")

message = ChatMessage(role=Role.USER, contents=[TextContent(text="Hello, how are you?")])

# Act
result = client._openai_chat_message_parser(message)

# Assert
assert len(result) == 1
parsed_message = result[0]
assert parsed_message["role"] == "user"
assert "content" in parsed_message
assert "tool_calls" not in parsed_message

def test_chat_message_with_only_tool_calls(self):
"""
Test that a ChatMessage with only tool calls (no text) works correctly.
"""
# Arrange
client = OpenAIChatClient(model_id="gpt-4", api_key="test-key")

message = ChatMessage(
role=Role.ASSISTANT,
contents=[
FunctionCallContent(call_id="call-999", name="get_weather", arguments={"location": "San Francisco"})
],
)

# Act
result = client._openai_chat_message_parser(message)

# Assert
assert len(result) == 1
parsed_message = result[0]
assert parsed_message["role"] == "assistant"
assert "tool_calls" in parsed_message
assert "content" not in parsed_message

def test_openai_api_compatibility(self):
"""
Test that the message format matches OpenAI API specification.
According to OpenAI docs, a single assistant message can have both
'content' and 'tool_calls' in the same message object.
"""
# Arrange
client = OpenAIChatClient(model_id="gpt-4", api_key="test-key")

message = ChatMessage(
role=Role.ASSISTANT,
contents=[
TextContent(text="I found the weather information."),
FunctionCallContent(
call_id="call_abc123", name="get_weather", arguments='{"location": "New York", "unit": "celsius"}'
),
],
)

# Act
result = client._openai_chat_message_parser(message)

# Assert - Verify OpenAI API compatible structure
assert len(result) == 1
api_message = result[0]

# Structure should match OpenAI API expectations
assert "role" in api_message
assert "content" in api_message
assert "tool_calls" in api_message

# Verify tool_call structure matches OpenAI spec
tool_call = api_message["tool_calls"][0]
assert "id" in tool_call
assert "type" in tool_call
assert tool_call["type"] == "function"
assert "function" in tool_call
assert "name" in tool_call["function"]
assert "arguments" in tool_call["function"]
Loading