Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 55 additions & 13 deletions python/packages/core/agent_framework/openai/_responses_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -788,6 +788,9 @@ async def _prepare_options(
request_input = self._prepare_messages_for_openai(messages)
if not request_input:
raise ServiceInvalidRequestError("Messages are required for chat completions")

conversation_id = self._get_current_conversation_id(options, **kwargs)

run_options["input"] = request_input

# model id
Expand Down Expand Up @@ -911,8 +914,11 @@ def _prepare_message_for_openai(
for content in message.contents:
match content.type:
case "text_reasoning":
# Don't send reasoning content back to model
continue
# Reasoning items must be sent back as top-level input items
# for reasoning models that require them alongside function_calls
reasoning = self._prepare_content_for_openai(message.role, content, call_id_to_id) # type: ignore[arg-type]
if reasoning:
all_messages.append(reasoning)
case "function_result":
new_args: dict[str, Any] = {}
new_args.update(self._prepare_content_for_openai(message.role, content, call_id_to_id)) # type: ignore[arg-type]
Expand Down Expand Up @@ -967,6 +973,8 @@ def _prepare_content_for_openai(
}
props: dict[str, Any] | None = getattr(content, "additional_properties", None)
if props:
if reasoning_id := props.get("reasoning_id"):
ret["id"] = reasoning_id
if status := props.get("status"):
ret["status"] = status
if reasoning_text := props.get("reasoning_text"):
Expand Down Expand Up @@ -1184,22 +1192,29 @@ def _parse_response_from_openai(
)
)
case "reasoning": # ResponseOutputReasoning
reasoning_id = getattr(item, "id", None)
if hasattr(item, "content") and item.content:
for index, reasoning_content in enumerate(item.content):
additional_properties = None
additional_properties: dict[str, Any] = {}
if reasoning_id:
additional_properties["reasoning_id"] = reasoning_id
if hasattr(item, "summary") and item.summary and index < len(item.summary):
additional_properties = {"summary": item.summary[index]}
additional_properties["summary"] = item.summary[index]
contents.append(
Content.from_text_reasoning(
text=reasoning_content.text,
raw_representation=reasoning_content,
additional_properties=additional_properties,
additional_properties=additional_properties or None,
)
)
if hasattr(item, "summary") and item.summary:
for summary in item.summary:
contents.append(
Content.from_text_reasoning(text=summary.text, raw_representation=summary) # type: ignore[arg-type]
Content.from_text_reasoning(
text=summary.text,
raw_representation=summary, # type: ignore[arg-type]
additional_properties={"reasoning_id": reasoning_id} if reasoning_id else None,
)
)
case "code_interpreter_call": # ResponseOutputCodeInterpreterCall
call_id = getattr(item, "call_id", None) or getattr(item, "id", None)
Expand Down Expand Up @@ -1413,16 +1428,40 @@ def _parse_chunk_from_openai(
contents.append(Content.from_text(text=event.delta, raw_representation=event))
metadata.update(self._get_metadata_from_response(event))
case "response.reasoning_text.delta":
contents.append(Content.from_text_reasoning(text=event.delta, raw_representation=event))
contents.append(
Content.from_text_reasoning(
text=event.delta,
raw_representation=event,
additional_properties={"reasoning_id": event.item_id},
)
)
metadata.update(self._get_metadata_from_response(event))
case "response.reasoning_text.done":
contents.append(Content.from_text_reasoning(text=event.text, raw_representation=event))
contents.append(
Content.from_text_reasoning(
text=event.text,
raw_representation=event,
additional_properties={"reasoning_id": event.item_id},
)
)
metadata.update(self._get_metadata_from_response(event))
case "response.reasoning_summary_text.delta":
contents.append(Content.from_text_reasoning(text=event.delta, raw_representation=event))
contents.append(
Content.from_text_reasoning(
text=event.delta,
raw_representation=event,
additional_properties={"reasoning_id": event.item_id},
)
)
metadata.update(self._get_metadata_from_response(event))
case "response.reasoning_summary_text.done":
contents.append(Content.from_text_reasoning(text=event.text, raw_representation=event))
contents.append(
Content.from_text_reasoning(
text=event.text,
raw_representation=event,
additional_properties={"reasoning_id": event.item_id},
)
)
metadata.update(self._get_metadata_from_response(event))
case "response.code_interpreter_call_code.delta":
call_id = getattr(event, "call_id", None) or getattr(event, "id", None) or event.item_id
Expand Down Expand Up @@ -1593,20 +1632,23 @@ def _parse_chunk_from_openai(
)
)
case "reasoning": # ResponseOutputReasoning
reasoning_id = getattr(event_item, "id", None)
if hasattr(event_item, "content") and event_item.content:
for index, reasoning_content in enumerate(event_item.content):
additional_properties = None
additional_properties: dict[str, Any] = {}
if reasoning_id:
additional_properties["reasoning_id"] = reasoning_id
if (
hasattr(event_item, "summary")
and event_item.summary
and index < len(event_item.summary)
):
additional_properties = {"summary": event_item.summary[index]}
additional_properties["summary"] = event_item.summary[index]
contents.append(
Content.from_text_reasoning(
text=reasoning_content.text,
raw_representation=reasoning_content,
additional_properties=additional_properties,
additional_properties=additional_properties or None,
)
)
case _:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,7 @@ def test_azure_assistants_client_init_validation_fail() -> None:
def test_azure_assistants_client_init_missing_deployment_name(azure_openai_unit_test_env: dict[str, str]) -> None:
"""Test AzureOpenAIAssistantsClient initialization with missing deployment name."""
with pytest.raises(ServiceInitializationError):
AzureOpenAIAssistantsClient(
api_key=azure_openai_unit_test_env.get("AZURE_OPENAI_API_KEY", "test-key")
)
AzureOpenAIAssistantsClient(api_key=azure_openai_unit_test_env.get("AZURE_OPENAI_API_KEY", "test-key"))


def test_azure_assistants_client_init_with_default_headers(azure_openai_unit_test_env: dict[str, str]) -> None:
Expand Down
6 changes: 2 additions & 4 deletions python/packages/core/tests/azure/test_azure_chat_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,15 +94,13 @@ def test_init_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None:
@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"]], indirect=True)
def test_init_with_empty_deployment_name(azure_openai_unit_test_env: dict[str, str]) -> None:
with pytest.raises(ServiceInitializationError):
AzureOpenAIChatClient(
)
AzureOpenAIChatClient()


@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_BASE_URL"]], indirect=True)
def test_init_with_empty_endpoint_and_base_url(azure_openai_unit_test_env: dict[str, str]) -> None:
with pytest.raises(ServiceInitializationError):
AzureOpenAIChatClient(
)
AzureOpenAIChatClient()


@pytest.mark.parametrize("override_env_param_dict", [{"AZURE_OPENAI_ENDPOINT": "http://test.com"}], indirect=True)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Copyright (c) Microsoft. All rights reserved.

import json
import logging
import os
from typing import Annotated, Any
from unittest.mock import MagicMock
Expand Down Expand Up @@ -30,6 +31,8 @@
else "Integration tests are disabled.",
)

logger = logging.getLogger(__name__)


class OutputStruct(BaseModel):
"""A structured output for testing purposes."""
Expand Down Expand Up @@ -111,8 +114,7 @@ def test_init_with_default_header(azure_openai_unit_test_env: dict[str, str]) ->
@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"]], indirect=True)
def test_init_with_empty_model_id(azure_openai_unit_test_env: dict[str, str]) -> None:
with pytest.raises(ServiceInitializationError):
AzureOpenAIResponsesClient(
)
AzureOpenAIResponsesClient()


def test_init_with_project_client(azure_openai_unit_test_env: dict[str, str]) -> None:
Expand Down
71 changes: 71 additions & 0 deletions python/packages/core/tests/core/test_function_invocation_logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2700,3 +2700,74 @@ def test_func(arg1: str) -> str:
assert conversation_ids_received[1] == "stream_conv_after_first", (
"streaming: conversation_id should be updated in options after receiving new conversation_id from API"
)


async def test_streaming_function_calling_response_includes_reasoning_and_tool_results(
chat_client_base: SupportsChatGetResponse,
):
"""Test that the finalized streaming response includes reasoning, function_call,
function_result, and final text in its messages.

This is critical for workflow chaining: when one agent's response is passed as
input to the next agent, the conversation must include all items (reasoning,
function_call, function_call_output) so the API can validate the history.
"""

@tool(name="search", approval_mode="never_require")
def search_func(query: str) -> str:
return f"Found results for {query}"

chat_client_base.streaming_responses = [
[
# First response: reasoning + function_call
ChatResponseUpdate(
contents=[
Content.from_text_reasoning(
text="Let me search for that",
additional_properties={"reasoning_id": "rs_test123", "status": "completed"},
)
],
role="assistant",
),
ChatResponseUpdate(
contents=[
Content.from_function_call(
call_id="call_1",
name="search",
arguments='{"query": "test"}',
additional_properties={"fc_id": "fc_test456"},
)
],
role="assistant",
),
],
[
# Second response: final text
ChatResponseUpdate(
contents=[Content.from_text(text="Here are the results")],
role="assistant",
),
],
]

stream = chat_client_base.get_response(
"search for test", options={"tool_choice": "auto", "tools": [search_func]}, stream=True
)

updates = []
async for update in stream:
updates.append(update)
response = await stream.get_final_response()

# Verify all content types are in the response messages
all_content_types = [c.type for msg in response.messages for c in msg.contents]
assert "text_reasoning" in all_content_types, "Reasoning must be preserved in response messages"
assert "function_call" in all_content_types, "Function call must be preserved in response messages"
assert "function_result" in all_content_types, "Function result must be in response messages for chaining"
assert "text" in all_content_types, "Final text must be in response messages"

# Verify reasoning has the reasoning_id preserved
reasoning_contents = [c for msg in response.messages for c in msg.contents if c.type == "text_reasoning"]
assert len(reasoning_contents) >= 1
assert reasoning_contents[0].additional_properties is not None
assert reasoning_contents[0].additional_properties.get("reasoning_id") == "rs_test123"
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,7 @@ def test_init_validation_fail() -> None:
def test_init_missing_model_id(openai_unit_test_env: dict[str, str]) -> None:
"""Test OpenAIAssistantsClient initialization with missing model ID."""
with pytest.raises(ServiceInitializationError):
OpenAIAssistantsClient(
api_key=openai_unit_test_env.get("OPENAI_API_KEY", "test-key")
)
OpenAIAssistantsClient(api_key=openai_unit_test_env.get("OPENAI_API_KEY", "test-key"))


@pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True)
Expand Down
3 changes: 1 addition & 2 deletions python/packages/core/tests/openai/test_openai_chat_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,7 @@ def test_init_base_url_from_settings_env() -> None:
@pytest.mark.parametrize("exclude_list", [["OPENAI_CHAT_MODEL_ID"]], indirect=True)
def test_init_with_empty_model_id(openai_unit_test_env: dict[str, str]) -> None:
with pytest.raises(ServiceInitializationError):
OpenAIChatClient(
)
OpenAIChatClient()


@pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True)
Expand Down
Loading