diff --git a/python/CODING_STANDARD.md b/python/CODING_STANDARD.md
index 1b7b2726b8..d5f9d6f150 100644
--- a/python/CODING_STANDARD.md
+++ b/python/CODING_STANDARD.md
@@ -55,8 +55,8 @@ Prefer attributes over inheritance when parameters are mostly the same:
# ✅ Preferred - using attributes
from agent_framework import ChatMessage
-user_msg = ChatMessage(role="user", content="Hello, world!")
-asst_msg = ChatMessage(role="assistant", content="Hello, world!")
+user_msg = ChatMessage("user", ["Hello, world!"])
+asst_msg = ChatMessage("assistant", ["Hello, world!"])
# ❌ Not preferred - unnecessary inheritance
from agent_framework import UserMessage, AssistantMessage
diff --git a/python/README.md b/python/README.md
index 06eca19999..74d7052c12 100644
--- a/python/README.md
+++ b/python/README.md
@@ -113,8 +113,8 @@ async def main():
client = OpenAIChatClient()
messages = [
- ChatMessage(role="system", text="You are a helpful assistant."),
- ChatMessage(role="user", text="Write a haiku about Agent Framework.")
+ ChatMessage("system", ["You are a helpful assistant."]),
+ ChatMessage("user", ["Write a haiku about Agent Framework."])
]
response = await client.get_response(messages)
diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py
index 00e045fba6..4dd89c6f02 100644
--- a/python/packages/a2a/agent_framework_a2a/_agent.py
+++ b/python/packages/a2a/agent_framework_a2a/_agent.py
@@ -32,7 +32,6 @@
BaseAgent,
ChatMessage,
Content,
- Role,
normalize_messages,
prepend_agent_framework_to_user_agent,
)
@@ -187,7 +186,7 @@ async def __aexit__(
async def run(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None,
*,
thread: AgentThread | None = None,
**kwargs: Any,
@@ -210,11 +209,11 @@ async def run(
"""
# Collect all updates and use framework to consolidate updates into response
updates = [update async for update in self.run_stream(messages, thread=thread, **kwargs)]
- return AgentResponse.from_agent_run_response_updates(updates)
+ return AgentResponse.from_updates(updates)
async def run_stream(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None,
*,
thread: AgentThread | None = None,
**kwargs: Any,
@@ -245,7 +244,7 @@ async def run_stream(
contents = self._parse_contents_from_a2a(item.parts)
yield AgentResponseUpdate(
contents=contents,
- role=Role.ASSISTANT if item.role == A2ARole.agent else Role.USER,
+ role="assistant" if item.role == A2ARole.agent else "user",
response_id=str(getattr(item, "message_id", uuid.uuid4())),
raw_representation=item,
)
@@ -269,7 +268,7 @@ async def run_stream(
# Empty task
yield AgentResponseUpdate(
contents=[],
- role=Role.ASSISTANT,
+ role="assistant",
response_id=task.id,
raw_representation=task,
)
@@ -421,7 +420,7 @@ def _parse_messages_from_task(self, task: Task) -> list[ChatMessage]:
contents = self._parse_contents_from_a2a(history_item.parts)
messages.append(
ChatMessage(
- role=Role.ASSISTANT if history_item.role == A2ARole.agent else Role.USER,
+ role="assistant" if history_item.role == A2ARole.agent else "user",
contents=contents,
raw_representation=history_item,
)
@@ -433,7 +432,7 @@ def _parse_message_from_artifact(self, artifact: Artifact) -> ChatMessage:
"""Parse A2A Artifact into ChatMessage using part contents."""
contents = self._parse_contents_from_a2a(artifact.parts)
return ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=contents,
raw_representation=artifact,
)
diff --git a/python/packages/a2a/tests/test_a2a_agent.py b/python/packages/a2a/tests/test_a2a_agent.py
index eca97b2ac6..cbbb16fd63 100644
--- a/python/packages/a2a/tests/test_a2a_agent.py
+++ b/python/packages/a2a/tests/test_a2a_agent.py
@@ -25,7 +25,6 @@
AgentResponseUpdate,
ChatMessage,
Content,
- Role,
)
from agent_framework.a2a import A2AAgent
from pytest import fixture, raises
@@ -129,7 +128,7 @@ async def test_run_with_message_response(a2a_agent: A2AAgent, mock_a2a_client: M
assert isinstance(response, AgentResponse)
assert len(response.messages) == 1
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert response.messages[0].text == "Hello from agent!"
assert response.response_id == "msg-123"
assert mock_a2a_client.call_count == 1
@@ -144,7 +143,7 @@ async def test_run_with_task_response_single_artifact(a2a_agent: A2AAgent, mock_
assert isinstance(response, AgentResponse)
assert len(response.messages) == 1
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert response.messages[0].text == "Generated report content"
assert response.response_id == "task-456"
assert mock_a2a_client.call_count == 1
@@ -170,7 +169,7 @@ async def test_run_with_task_response_multiple_artifacts(a2a_agent: A2AAgent, mo
# All should be assistant messages
for message in response.messages:
- assert message.role == Role.ASSISTANT
+ assert message.role == "assistant"
assert response.response_id == "task-789"
@@ -233,7 +232,7 @@ def test_parse_messages_from_task_with_artifacts(a2a_agent: A2AAgent) -> None:
assert len(result) == 2
assert result[0].text == "Content 1"
assert result[1].text == "Content 2"
- assert all(msg.role == Role.ASSISTANT for msg in result)
+ assert all(msg.role == "assistant" for msg in result)
def test_parse_message_from_artifact(a2a_agent: A2AAgent) -> None:
@@ -252,7 +251,7 @@ def test_parse_message_from_artifact(a2a_agent: A2AAgent) -> None:
result = a2a_agent._parse_message_from_artifact(artifact)
assert isinstance(result, ChatMessage)
- assert result.role == Role.ASSISTANT
+ assert result.role == "assistant"
assert result.text == "Artifact content"
assert result.raw_representation == artifact
@@ -296,7 +295,7 @@ def test_prepare_message_for_a2a_with_error_content(a2a_agent: A2AAgent) -> None
# Create ChatMessage with ErrorContent
error_content = Content.from_error(message="Test error message")
- message = ChatMessage(role=Role.USER, contents=[error_content])
+ message = ChatMessage("user", [error_content])
# Convert to A2A message
a2a_message = a2a_agent._prepare_message_for_a2a(message)
@@ -311,7 +310,7 @@ def test_prepare_message_for_a2a_with_uri_content(a2a_agent: A2AAgent) -> None:
# Create ChatMessage with UriContent
uri_content = Content.from_uri(uri="http://example.com/file.pdf", media_type="application/pdf")
- message = ChatMessage(role=Role.USER, contents=[uri_content])
+ message = ChatMessage("user", [uri_content])
# Convert to A2A message
a2a_message = a2a_agent._prepare_message_for_a2a(message)
@@ -327,7 +326,7 @@ def test_prepare_message_for_a2a_with_data_content(a2a_agent: A2AAgent) -> None:
# Create ChatMessage with DataContent (base64 data URI)
data_content = Content.from_uri(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain")
- message = ChatMessage(role=Role.USER, contents=[data_content])
+ message = ChatMessage("user", [data_content])
# Convert to A2A message
a2a_message = a2a_agent._prepare_message_for_a2a(message)
@@ -341,7 +340,7 @@ def test_prepare_message_for_a2a_with_data_content(a2a_agent: A2AAgent) -> None:
def test_prepare_message_for_a2a_empty_contents_raises_error(a2a_agent: A2AAgent) -> None:
"""Test _prepare_message_for_a2a with empty contents raises ValueError."""
# Create ChatMessage with no contents
- message = ChatMessage(role=Role.USER, contents=[])
+ message = ChatMessage("user", [])
# Should raise ValueError for empty contents
with raises(ValueError, match="ChatMessage.contents is empty"):
@@ -360,7 +359,7 @@ async def test_run_stream_with_message_response(a2a_agent: A2AAgent, mock_a2a_cl
# Verify streaming response
assert len(updates) == 1
assert isinstance(updates[0], AgentResponseUpdate)
- assert updates[0].role == Role.ASSISTANT
+ assert updates[0].role == "assistant"
assert len(updates[0].contents) == 1
content = updates[0].contents[0]
@@ -408,7 +407,7 @@ def test_prepare_message_for_a2a_with_multiple_contents() -> None:
# Create message with multiple content types
message = ChatMessage(
- role=Role.USER,
+ role="user",
contents=[
Content.from_text(text="Here's the analysis:"),
Content.from_data(data=b"binary data", media_type="application/octet-stream"),
@@ -465,7 +464,7 @@ def test_prepare_message_for_a2a_with_hosted_file() -> None:
# Create message with hosted file content
message = ChatMessage(
- role=Role.USER,
+ role="user",
contents=[Content.from_hosted_file(file_id="hosted://storage/document.pdf")],
)
diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_client.py b/python/packages/ag-ui/agent_framework_ag_ui/_client.py
index 74bb50e306..340d2c125f 100644
--- a/python/packages/ag-ui/agent_framework_ag_ui/_client.py
+++ b/python/packages/ag-ui/agent_framework_ag_ui/_client.py
@@ -334,7 +334,7 @@ async def _inner_get_response(
Returns:
ChatResponse object
"""
- return await ChatResponse.from_chat_response_generator(
+ return await ChatResponse.from_update_generator(
self._inner_get_streaming_response(
messages=messages,
options=options,
diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py b/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py
index bd2d989f2a..7b7e99e8d4 100644
--- a/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py
+++ b/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py
@@ -7,8 +7,6 @@
from agent_framework import (
ChatResponseUpdate,
Content,
- FinishReason,
- Role,
)
@@ -86,7 +84,7 @@ def _handle_run_started(self, event: dict[str, Any]) -> ChatResponseUpdate:
self.run_id = event.get("runId")
return ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[],
additional_properties={
"thread_id": self.thread_id,
@@ -98,7 +96,7 @@ def _handle_text_message_start(self, event: dict[str, Any]) -> ChatResponseUpdat
"""Handle TEXT_MESSAGE_START event."""
self.current_message_id = event.get("messageId")
return ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
message_id=self.current_message_id,
contents=[],
)
@@ -112,7 +110,7 @@ def _handle_text_message_content(self, event: dict[str, Any]) -> ChatResponseUpd
self.current_message_id = message_id
return ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
message_id=self.current_message_id,
contents=[Content.from_text(text=delta)],
)
@@ -128,7 +126,7 @@ def _handle_tool_call_start(self, event: dict[str, Any]) -> ChatResponseUpdate:
self.accumulated_tool_args = ""
return ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id=self.current_tool_call_id or "",
@@ -144,7 +142,7 @@ def _handle_tool_call_args(self, event: dict[str, Any]) -> ChatResponseUpdate:
self.accumulated_tool_args += delta
return ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id=self.current_tool_call_id or "",
@@ -165,7 +163,7 @@ def _handle_tool_call_result(self, event: dict[str, Any]) -> ChatResponseUpdate:
result = event.get("result") if event.get("result") is not None else event.get("content")
return ChatResponseUpdate(
- role=Role.TOOL,
+ role="tool",
contents=[
Content.from_function_result(
call_id=tool_call_id,
@@ -177,8 +175,8 @@ def _handle_tool_call_result(self, event: dict[str, Any]) -> ChatResponseUpdate:
def _handle_run_finished(self, event: dict[str, Any]) -> ChatResponseUpdate:
"""Handle RUN_FINISHED event."""
return ChatResponseUpdate(
- role=Role.ASSISTANT,
- finish_reason=FinishReason.STOP,
+ role="assistant",
+ finish_reason="stop",
contents=[],
additional_properties={
"thread_id": self.thread_id,
@@ -191,8 +189,8 @@ def _handle_run_error(self, event: dict[str, Any]) -> ChatResponseUpdate:
error_message = event.get("message", "Unknown error")
return ChatResponseUpdate(
- role=Role.ASSISTANT,
- finish_reason=FinishReason.CONTENT_FILTER,
+ role="assistant",
+ finish_reason="content_filter",
contents=[
Content.from_error(
message=error_message,
diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py
index f8f1623a30..dfa64e9bdb 100644
--- a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py
+++ b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py
@@ -9,7 +9,6 @@
from agent_framework import (
ChatMessage,
Content,
- Role,
prepare_function_call_results,
)
@@ -269,7 +268,7 @@ def _update_tool_call_arguments(
def _find_matching_func_call(call_id: str) -> Content | None:
for prev_msg in result:
- role_val = prev_msg.role.value if hasattr(prev_msg.role, "value") else str(prev_msg.role)
+ role_val = prev_msg.role if hasattr(prev_msg.role, "value") else str(prev_msg.role)
if role_val != "assistant":
continue
for content in prev_msg.contents or []:
@@ -287,7 +286,7 @@ def _resolve_approval_call_id(tool_call_id: str, parsed_payload: dict[str, Any]
return str(explicit_call_id)
for prev_msg in result:
- role_val = prev_msg.role.value if hasattr(prev_msg.role, "value") else str(prev_msg.role)
+ role_val = prev_msg.role if hasattr(prev_msg.role, "value") else str(prev_msg.role)
if role_val != "assistant":
continue
direct_call = None
@@ -396,7 +395,7 @@ def _filter_modified_args(
m
for m in result
if not (
- (m.role.value if hasattr(m.role, "value") else str(m.role)) == "tool"
+ (m.role if hasattr(m.role, "value") else str(m.role)) == "tool"
and any(
c.type == "function_result" and c.call_id == approval_call_id
for c in (m.contents or [])
@@ -473,14 +472,14 @@ def _filter_modified_args(
additional_properties={"ag_ui_state_args": state_args} if state_args else None,
)
chat_msg = ChatMessage(
- role=Role.USER,
+ role="user",
contents=[approval_response],
)
else:
# No matching function call found - this is likely a confirm_changes approval
# Keep the old behavior for backwards compatibility
chat_msg = ChatMessage(
- role=Role.USER,
+ role="user",
contents=[Content.from_text(text=approval_payload_text)],
additional_properties={"is_tool_result": True, "tool_call_id": str(tool_call_id or "")},
)
@@ -500,7 +499,7 @@ def _filter_modified_args(
else:
func_result = str(result_content)
chat_msg = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[Content.from_function_result(call_id=str(tool_call_id), result=func_result)],
)
if "id" in msg:
@@ -516,7 +515,7 @@ def _filter_modified_args(
result_content = msg.get("result", msg.get("content", ""))
chat_msg = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[Content.from_function_result(call_id=str(tool_call_id), result=result_content)],
)
if "id" in msg:
@@ -554,7 +553,7 @@ def _filter_modified_args(
arguments=arguments,
)
)
- chat_msg = ChatMessage(role=Role.ASSISTANT, contents=contents)
+ chat_msg = ChatMessage("assistant", contents)
if "id" in msg:
chat_msg.message_id = msg["id"]
result.append(chat_msg)
@@ -562,7 +561,7 @@ def _filter_modified_args(
# No special handling required for assistant/plain messages here
- role = AGUI_TO_FRAMEWORK_ROLE.get(role_str, Role.USER)
+ role = AGUI_TO_FRAMEWORK_ROLE.get(role_str, "user")
# Check if this message contains function approvals
if "function_approvals" in msg and msg["function_approvals"]:
@@ -584,14 +583,14 @@ def _filter_modified_args(
)
approval_contents.append(approval_response)
- chat_msg = ChatMessage(role=role, contents=approval_contents) # type: ignore[arg-type]
+ chat_msg = ChatMessage(role, approval_contents) # type: ignore[arg-type]
else:
# Regular text message
content = msg.get("content", "")
if isinstance(content, str):
- chat_msg = ChatMessage(role=role, contents=[Content.from_text(text=content)])
+ chat_msg = ChatMessage(role, [Content.from_text(text=content)])
else:
- chat_msg = ChatMessage(role=role, contents=[Content.from_text(text=str(content))])
+ chat_msg = ChatMessage(role, [Content.from_text(text=str(content))])
if "id" in msg:
chat_msg.message_id = msg["id"]
diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_run.py b/python/packages/ag-ui/agent_framework_ag_ui/_run.py
index d1229620a7..7cd9e0c686 100644
--- a/python/packages/ag-ui/agent_framework_ag_ui/_run.py
+++ b/python/packages/ag-ui/agent_framework_ag_ui/_run.py
@@ -862,7 +862,7 @@ async def run_agent_stream(
from pydantic import BaseModel
logger.info(f"Processing structured output, update count: {len(all_updates)}")
- final_response = AgentResponse.from_agent_run_response_updates(all_updates, output_format_type=response_format)
+ final_response = AgentResponse.from_updates(all_updates, output_format_type=response_format)
if final_response.value and isinstance(final_response.value, BaseModel):
response_dict = final_response.value.model_dump(mode="json", exclude_none=True)
diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py
index f7f01261f5..bb33c3279e 100644
--- a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py
+++ b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py
@@ -10,19 +10,19 @@
from datetime import date, datetime
from typing import Any
-from agent_framework import AgentResponseUpdate, ChatResponseUpdate, FunctionTool, Role, ToolProtocol
+from agent_framework import AgentResponseUpdate, ChatResponseUpdate, FunctionTool, ToolProtocol
# Role mapping constants
-AGUI_TO_FRAMEWORK_ROLE: dict[str, Role] = {
- "user": Role.USER,
- "assistant": Role.ASSISTANT,
- "system": Role.SYSTEM,
+AGUI_TO_FRAMEWORK_ROLE: dict[str, str] = {
+ "user": "user",
+ "assistant": "assistant",
+ "system": "system",
}
-FRAMEWORK_TO_AGUI_ROLE: dict[Role, str] = {
- Role.USER: "user",
- Role.ASSISTANT: "assistant",
- Role.SYSTEM: "system",
+FRAMEWORK_TO_AGUI_ROLE: dict[str, str] = {
+ "user": "user",
+ "assistant": "assistant",
+ "system": "system",
}
ALLOWED_AGUI_ROLES: set[str] = {"user", "assistant", "system", "tool"}
diff --git a/python/packages/ag-ui/getting_started/client_with_agent.py b/python/packages/ag-ui/getting_started/client_with_agent.py
index be23404583..1a17a8e618 100644
--- a/python/packages/ag-ui/getting_started/client_with_agent.py
+++ b/python/packages/ag-ui/getting_started/client_with_agent.py
@@ -171,7 +171,7 @@ def _preview_for_message(m) -> str:
messages = await thread.message_store.list_messages()
print(f"\n[THREAD STATE] {len(messages)} messages in thread's message_store")
for i, msg in enumerate(messages[-6:], 1): # Show last 6
- role = msg.role.value if hasattr(msg.role, "value") else str(msg.role)
+ role = msg.role if hasattr(msg.role, "value") else str(msg.role)
text_preview = _preview_for_message(msg)
print(f" {i}. [{role}]: {text_preview}")
diff --git a/python/packages/ag-ui/tests/test_ag_ui_client.py b/python/packages/ag-ui/tests/test_ag_ui_client.py
index af9c7fb916..5f4ad1794b 100644
--- a/python/packages/ag-ui/tests/test_ag_ui_client.py
+++ b/python/packages/ag-ui/tests/test_ag_ui_client.py
@@ -12,7 +12,6 @@
ChatResponse,
ChatResponseUpdate,
Content,
- Role,
tool,
)
from pytest import MonkeyPatch
@@ -76,8 +75,8 @@ async def test_extract_state_from_messages_no_state(self) -> None:
"""Test state extraction when no state is present."""
client = TestableAGUIChatClient(endpoint="http://localhost:8888/")
messages = [
- ChatMessage(role="user", text="Hello"),
- ChatMessage(role="assistant", text="Hi there"),
+ ChatMessage("user", ["Hello"]),
+ ChatMessage("assistant", ["Hi there"]),
]
result_messages, state = client.extract_state_from_messages(messages)
@@ -96,7 +95,7 @@ async def test_extract_state_from_messages_with_state(self) -> None:
state_b64 = base64.b64encode(state_json.encode("utf-8")).decode("utf-8")
messages = [
- ChatMessage(role="user", text="Hello"),
+ ChatMessage("user", ["Hello"]),
ChatMessage(
role="user",
contents=[Content.from_uri(uri=f"data:application/json;base64,{state_b64}")],
@@ -134,8 +133,8 @@ async def test_convert_messages_to_agui_format(self) -> None:
"""Test message conversion to AG-UI format."""
client = TestableAGUIChatClient(endpoint="http://localhost:8888/")
messages = [
- ChatMessage(role=Role.USER, text="What is the weather?"),
- ChatMessage(role=Role.ASSISTANT, text="Let me check.", message_id="msg_123"),
+ ChatMessage("user", ["What is the weather?"]),
+ ChatMessage("assistant", ["Let me check."], message_id="msg_123"),
]
agui_messages = client.convert_messages_to_agui_format(messages)
@@ -182,7 +181,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str
client = TestableAGUIChatClient(endpoint="http://localhost:8888/")
monkeypatch.setattr(client.http_service, "post_run", mock_post_run)
- messages = [ChatMessage(role="user", text="Test message")]
+ messages = [ChatMessage("user", ["Test message"])]
chat_options = ChatOptions()
updates: list[ChatResponseUpdate] = []
@@ -215,7 +214,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str
client = TestableAGUIChatClient(endpoint="http://localhost:8888/")
monkeypatch.setattr(client.http_service, "post_run", mock_post_run)
- messages = [ChatMessage(role="user", text="Test message")]
+ messages = [ChatMessage("user", ["Test message"])]
chat_options = {}
response = await client.inner_get_response(messages=messages, options=chat_options)
@@ -258,7 +257,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str
client = TestableAGUIChatClient(endpoint="http://localhost:8888/")
monkeypatch.setattr(client.http_service, "post_run", mock_post_run)
- messages = [ChatMessage(role="user", text="Test with tools")]
+ messages = [ChatMessage("user", ["Test with tools"])]
chat_options = ChatOptions(tools=[test_tool])
response = await client.inner_get_response(messages=messages, options=chat_options)
@@ -282,7 +281,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str
client = TestableAGUIChatClient(endpoint="http://localhost:8888/")
monkeypatch.setattr(client.http_service, "post_run", mock_post_run)
- messages = [ChatMessage(role="user", text="Test server tool execution")]
+ messages = [ChatMessage("user", ["Test server tool execution"])]
updates: list[ChatResponseUpdate] = []
async for update in client.get_streaming_response(messages):
@@ -324,7 +323,7 @@ async def fake_auto_invoke(*args: object, **kwargs: Any) -> None:
client = TestableAGUIChatClient(endpoint="http://localhost:8888/")
monkeypatch.setattr(client.http_service, "post_run", mock_post_run)
- messages = [ChatMessage(role="user", text="Test server tool execution")]
+ messages = [ChatMessage("user", ["Test server tool execution"])]
async for _ in client.get_streaming_response(messages, options={"tool_choice": "auto", "tools": [client_tool]}):
pass
@@ -338,7 +337,7 @@ async def test_state_transmission(self, monkeypatch: MonkeyPatch) -> None:
state_b64 = base64.b64encode(state_json.encode("utf-8")).decode("utf-8")
messages = [
- ChatMessage(role="user", text="Hello"),
+ ChatMessage("user", ["Hello"]),
ChatMessage(
role="user",
contents=[Content.from_uri(uri=f"data:application/json;base64,{state_b64}")],
diff --git a/python/packages/ag-ui/tests/test_event_converters.py b/python/packages/ag-ui/tests/test_event_converters.py
index ff4d2ddc91..f26013a3fe 100644
--- a/python/packages/ag-ui/tests/test_event_converters.py
+++ b/python/packages/ag-ui/tests/test_event_converters.py
@@ -2,8 +2,6 @@
"""Tests for AG-UI event converter."""
-from agent_framework import FinishReason, Role
-
from agent_framework_ag_ui._event_converters import AGUIEventConverter
@@ -22,7 +20,7 @@ def test_run_started_event(self) -> None:
update = converter.convert_event(event)
assert update is not None
- assert update.role == Role.ASSISTANT
+ assert update.role == "assistant"
assert update.additional_properties["thread_id"] == "thread_123"
assert update.additional_properties["run_id"] == "run_456"
assert converter.thread_id == "thread_123"
@@ -39,7 +37,7 @@ def test_text_message_start_event(self) -> None:
update = converter.convert_event(event)
assert update is not None
- assert update.role == Role.ASSISTANT
+ assert update.role == "assistant"
assert update.message_id == "msg_789"
assert converter.current_message_id == "msg_789"
@@ -55,7 +53,7 @@ def test_text_message_content_event(self) -> None:
update = converter.convert_event(event)
assert update is not None
- assert update.role == Role.ASSISTANT
+ assert update.role == "assistant"
assert update.message_id == "msg_1"
assert len(update.contents) == 1
assert update.contents[0].text == "Hello"
@@ -101,7 +99,7 @@ def test_tool_call_start_event(self) -> None:
update = converter.convert_event(event)
assert update is not None
- assert update.role == Role.ASSISTANT
+ assert update.role == "assistant"
assert len(update.contents) == 1
assert update.contents[0].call_id == "call_123"
assert update.contents[0].name == "get_weather"
@@ -184,7 +182,7 @@ def test_tool_call_result_event(self) -> None:
update = converter.convert_event(event)
assert update is not None
- assert update.role == Role.TOOL
+ assert update.role == "tool"
assert len(update.contents) == 1
assert update.contents[0].call_id == "call_123"
assert update.contents[0].result == {"temperature": 22, "condition": "sunny"}
@@ -204,8 +202,8 @@ def test_run_finished_event(self) -> None:
update = converter.convert_event(event)
assert update is not None
- assert update.role == Role.ASSISTANT
- assert update.finish_reason == FinishReason.STOP
+ assert update.role == "assistant"
+ assert update.finish_reason == "stop"
assert update.additional_properties["thread_id"] == "thread_123"
assert update.additional_properties["run_id"] == "run_456"
@@ -223,8 +221,8 @@ def test_run_error_event(self) -> None:
update = converter.convert_event(event)
assert update is not None
- assert update.role == Role.ASSISTANT
- assert update.finish_reason == FinishReason.CONTENT_FILTER
+ assert update.role == "assistant"
+ assert update.finish_reason == "content_filter"
assert len(update.contents) == 1
assert update.contents[0].message == "Connection timeout"
assert update.contents[0].error_code == "RUN_ERROR"
diff --git a/python/packages/ag-ui/tests/test_helpers.py b/python/packages/ag-ui/tests/test_helpers.py
index b4a7e9f047..2fdd1d6771 100644
--- a/python/packages/ag-ui/tests/test_helpers.py
+++ b/python/packages/ag-ui/tests/test_helpers.py
@@ -29,8 +29,8 @@ def test_empty_messages(self):
def test_no_tool_calls(self):
"""Returns empty set when no tool calls in messages."""
messages = [
- ChatMessage(role="user", contents=[Content.from_text("Hello")]),
- ChatMessage(role="assistant", contents=[Content.from_text("Hi there")]),
+ ChatMessage("user", [Content.from_text("Hello")]),
+ ChatMessage("assistant", [Content.from_text("Hi there")]),
]
result = pending_tool_call_ids(messages)
assert result == set()
@@ -114,7 +114,7 @@ def test_system_message_without_state_prefix(self):
def test_empty_contents(self):
"""Returns False for message with empty contents."""
- message = ChatMessage(role="system", contents=[])
+ message = ChatMessage("system", [])
assert is_state_context_message(message) is False
@@ -342,7 +342,7 @@ def test_empty_messages(self):
def test_no_approval_response(self):
"""Returns None when no approval response in last message."""
messages = [
- ChatMessage(role="assistant", contents=[Content.from_text("Hello")]),
+ ChatMessage("assistant", [Content.from_text("Hello")]),
]
result = latest_approval_response(messages)
assert result is None
@@ -357,7 +357,7 @@ def test_finds_approval_response(self):
function_call=fc,
)
messages = [
- ChatMessage(role="user", contents=[approval_content]),
+ ChatMessage("user", [approval_content]),
]
result = latest_approval_response(messages)
assert result is approval_content
diff --git a/python/packages/ag-ui/tests/test_message_adapters.py b/python/packages/ag-ui/tests/test_message_adapters.py
index 4f6c3f1d42..85fe778e09 100644
--- a/python/packages/ag-ui/tests/test_message_adapters.py
+++ b/python/packages/ag-ui/tests/test_message_adapters.py
@@ -5,7 +5,7 @@
import json
import pytest
-from agent_framework import ChatMessage, Content, Role
+from agent_framework import ChatMessage, Content
from agent_framework_ag_ui._message_adapters import (
agent_framework_messages_to_agui,
@@ -24,7 +24,7 @@ def sample_agui_message():
@pytest.fixture
def sample_agent_framework_message():
"""Create a sample Agent Framework message."""
- return ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")], message_id="msg-123")
+ return ChatMessage("user", [Content.from_text(text="Hello")], message_id="msg-123")
def test_agui_to_agent_framework_basic(sample_agui_message):
@@ -32,7 +32,7 @@ def test_agui_to_agent_framework_basic(sample_agui_message):
messages = agui_messages_to_agent_framework([sample_agui_message])
assert len(messages) == 1
- assert messages[0].role == Role.USER
+ assert messages[0].role == "user"
assert messages[0].message_id == "msg-123"
@@ -86,7 +86,7 @@ def test_agui_tool_result_to_agent_framework():
assert len(messages) == 1
message = messages[0]
- assert message.role == Role.USER
+ assert message.role == "user"
assert len(message.contents) == 1
assert message.contents[0].type == "text"
@@ -328,9 +328,9 @@ def test_agui_multiple_messages_to_agent_framework():
messages = agui_messages_to_agent_framework(messages_input)
assert len(messages) == 3
- assert messages[0].role == Role.USER
- assert messages[1].role == Role.ASSISTANT
- assert messages[2].role == Role.USER
+ assert messages[0].role == "user"
+ assert messages[1].role == "assistant"
+ assert messages[2].role == "user"
def test_agui_empty_messages():
@@ -366,7 +366,7 @@ def test_agui_function_approvals():
assert len(messages) == 1
msg = messages[0]
- assert msg.role == Role.USER
+ assert msg.role == "user"
assert len(msg.contents) == 2
assert msg.contents[0].type == "function_approval_response"
@@ -385,7 +385,7 @@ def test_agui_system_role():
messages = agui_messages_to_agent_framework([{"role": "system", "content": "System prompt"}])
assert len(messages) == 1
- assert messages[0].role == Role.SYSTEM
+ assert messages[0].role == "system"
def test_agui_non_string_content():
@@ -425,7 +425,7 @@ def test_agui_with_tool_calls_to_agent_framework():
assert len(messages) == 1
msg = messages[0]
- assert msg.role == Role.ASSISTANT
+ assert msg.role == "assistant"
assert msg.message_id == "msg-789"
# First content is text, second is the function call
assert msg.contents[0].type == "text"
@@ -439,7 +439,7 @@ def test_agui_with_tool_calls_to_agent_framework():
def test_agent_framework_to_agui_with_tool_calls():
"""Test converting Agent Framework message with tool calls to AG-UI."""
msg = ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_text(text="Calling tool"),
Content.from_function_call(call_id="call-123", name="search", arguments={"query": "test"}),
@@ -464,7 +464,7 @@ def test_agent_framework_to_agui_with_tool_calls():
def test_agent_framework_to_agui_multiple_text_contents():
"""Test concatenating multiple text contents."""
msg = ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[Content.from_text(text="Part 1 "), Content.from_text(text="Part 2")],
)
@@ -476,7 +476,7 @@ def test_agent_framework_to_agui_multiple_text_contents():
def test_agent_framework_to_agui_no_message_id():
"""Test message without message_id - should auto-generate ID."""
- msg = ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")])
+ msg = ChatMessage("user", [Content.from_text(text="Hello")])
messages = agent_framework_messages_to_agui([msg])
@@ -488,7 +488,7 @@ def test_agent_framework_to_agui_no_message_id():
def test_agent_framework_to_agui_system_role():
"""Test system role conversion."""
- msg = ChatMessage(role=Role.SYSTEM, contents=[Content.from_text(text="System")])
+ msg = ChatMessage("system", [Content.from_text(text="System")])
messages = agent_framework_messages_to_agui([msg])
@@ -534,7 +534,7 @@ def test_extract_text_from_custom_contents():
def test_agent_framework_to_agui_function_result_dict():
"""Test converting FunctionResultContent with dict result to AG-UI."""
msg = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[Content.from_function_result(call_id="call-123", result={"key": "value", "count": 42})],
message_id="msg-789",
)
@@ -551,7 +551,7 @@ def test_agent_framework_to_agui_function_result_dict():
def test_agent_framework_to_agui_function_result_none():
"""Test converting FunctionResultContent with None result to AG-UI."""
msg = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[Content.from_function_result(call_id="call-123", result=None)],
message_id="msg-789",
)
@@ -567,7 +567,7 @@ def test_agent_framework_to_agui_function_result_none():
def test_agent_framework_to_agui_function_result_string():
"""Test converting FunctionResultContent with string result to AG-UI."""
msg = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[Content.from_function_result(call_id="call-123", result="plain text result")],
message_id="msg-789",
)
@@ -582,7 +582,7 @@ def test_agent_framework_to_agui_function_result_string():
def test_agent_framework_to_agui_function_result_empty_list():
"""Test converting FunctionResultContent with empty list result to AG-UI."""
msg = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[Content.from_function_result(call_id="call-123", result=[])],
message_id="msg-789",
)
@@ -604,7 +604,7 @@ class MockTextContent:
text: str
msg = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[Content.from_function_result(call_id="call-123", result=[MockTextContent("Hello from MCP!")])],
message_id="msg-789",
)
@@ -626,7 +626,7 @@ class MockTextContent:
text: str
msg = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[
Content.from_function_result(
call_id="call-123",
@@ -723,7 +723,7 @@ def test_agui_to_agent_framework_tool_result():
assert len(result) == 2
# Second message should be tool result
tool_msg = result[1]
- assert tool_msg.role == Role.TOOL
+ assert tool_msg.role == "tool"
assert tool_msg.contents[0].type == "function_result"
assert tool_msg.contents[0].result == "Sunny"
diff --git a/python/packages/ag-ui/tests/test_message_hygiene.py b/python/packages/ag-ui/tests/test_message_hygiene.py
index ecc01de3cb..03c8a1b9b3 100644
--- a/python/packages/ag-ui/tests/test_message_hygiene.py
+++ b/python/packages/ag-ui/tests/test_message_hygiene.py
@@ -25,9 +25,7 @@ def test_sanitize_tool_history_injects_confirm_changes_result() -> None:
sanitized = _sanitize_tool_history(messages)
- tool_messages = [
- msg for msg in sanitized if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "tool"
- ]
+ tool_messages = [msg for msg in sanitized if (msg.role if hasattr(msg.role, "value") else str(msg.role)) == "tool"]
assert len(tool_messages) == 1
assert str(tool_messages[0].contents[0].call_id) == "call_confirm_123"
assert tool_messages[0].contents[0].result == "Confirmed"
diff --git a/python/packages/ag-ui/tests/test_run.py b/python/packages/ag-ui/tests/test_run.py
index a415000692..7fb7055ae0 100644
--- a/python/packages/ag-ui/tests/test_run.py
+++ b/python/packages/ag-ui/tests/test_run.py
@@ -188,7 +188,6 @@ def test_no_schema(self):
def test_creates_message(self):
"""Creates state context message."""
- from agent_framework import Role
state = {"document": "Hello world"}
schema = {"properties": {"document": {"type": "string"}}}
@@ -196,7 +195,7 @@ def test_creates_message(self):
result = _create_state_context_message(state, schema)
assert result is not None
- assert result.role == Role.SYSTEM
+ assert result.role == "system"
assert len(result.contents) == 1
assert "Hello world" in result.contents[0].text
assert "Current state" in result.contents[0].text
@@ -207,7 +206,7 @@ class TestInjectStateContext:
def test_no_state_message(self):
"""Returns original messages when no state context needed."""
- messages = [ChatMessage(role="user", contents=[Content.from_text("Hello")])]
+ messages = [ChatMessage("user", [Content.from_text("Hello")])]
result = _inject_state_context(messages, {}, {})
assert result == messages
@@ -219,8 +218,8 @@ def test_empty_messages(self):
def test_last_message_not_user(self):
"""Returns original messages when last message is not from user."""
messages = [
- ChatMessage(role="user", contents=[Content.from_text("Hello")]),
- ChatMessage(role="assistant", contents=[Content.from_text("Hi")]),
+ ChatMessage("user", [Content.from_text("Hello")]),
+ ChatMessage("assistant", [Content.from_text("Hi")]),
]
state = {"key": "value"}
schema = {"properties": {"key": {"type": "string"}}}
@@ -230,11 +229,10 @@ def test_last_message_not_user(self):
def test_injects_before_last_user_message(self):
"""Injects state context before last user message."""
- from agent_framework import Role
messages = [
- ChatMessage(role="system", contents=[Content.from_text("You are helpful")]),
- ChatMessage(role="user", contents=[Content.from_text("Hello")]),
+ ChatMessage("system", [Content.from_text("You are helpful")]),
+ ChatMessage("user", [Content.from_text("Hello")]),
]
state = {"document": "content"}
schema = {"properties": {"document": {"type": "string"}}}
@@ -243,13 +241,13 @@ def test_injects_before_last_user_message(self):
assert len(result) == 3
# System message first
- assert result[0].role == Role.SYSTEM
+ assert result[0].role == "system"
assert "helpful" in result[0].contents[0].text
# State context second
- assert result[1].role == Role.SYSTEM
+ assert result[1].role == "system"
assert "Current state" in result[1].contents[0].text
# User message last
- assert result[2].role == Role.USER
+ assert result[2].role == "user"
assert "Hello" in result[2].contents[0].text
@@ -357,7 +355,7 @@ def test_extract_approved_state_updates_no_handler():
"""Test _extract_approved_state_updates returns empty with no handler."""
from agent_framework_ag_ui._run import _extract_approved_state_updates
- messages = [ChatMessage(role="user", contents=[Content.from_text("Hello")])]
+ messages = [ChatMessage("user", [Content.from_text("Hello")])]
result = _extract_approved_state_updates(messages, None)
assert result == {}
@@ -368,6 +366,6 @@ def test_extract_approved_state_updates_no_approval():
from agent_framework_ag_ui._run import _extract_approved_state_updates
handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "content"}})
- messages = [ChatMessage(role="user", contents=[Content.from_text("Hello")])]
+ messages = [ChatMessage("user", [Content.from_text("Hello")])]
result = _extract_approved_state_updates(messages, handler)
assert result == {}
diff --git a/python/packages/ag-ui/tests/test_utils.py b/python/packages/ag-ui/tests/test_utils.py
index 7f1de812c4..41b8e3665b 100644
--- a/python/packages/ag-ui/tests/test_utils.py
+++ b/python/packages/ag-ui/tests/test_utils.py
@@ -404,11 +404,11 @@ def test_safe_json_parse_with_none():
def test_get_role_value_with_enum():
"""Test get_role_value with enum role."""
- from agent_framework import ChatMessage, Content, Role
+ from agent_framework import ChatMessage, Content
from agent_framework_ag_ui._utils import get_role_value
- message = ChatMessage(role=Role.USER, contents=[Content.from_text("test")])
+ message = ChatMessage("user", [Content.from_text("test")])
result = get_role_value(message)
assert result == "user"
diff --git a/python/packages/ag-ui/tests/utils_test_ag_ui.py b/python/packages/ag-ui/tests/utils_test_ag_ui.py
index 5c2415583c..9ac9b04df4 100644
--- a/python/packages/ag-ui/tests/utils_test_ag_ui.py
+++ b/python/packages/ag-ui/tests/utils_test_ag_ui.py
@@ -56,7 +56,7 @@ async def _inner_get_response(
contents.extend(update.contents)
return ChatResponse(
- messages=[ChatMessage(role="assistant", contents=contents)],
+ messages=[ChatMessage("assistant", contents)],
response_id="stub-response",
)
diff --git a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py
index 630b92ca02..901a42122f 100644
--- a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py
+++ b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py
@@ -13,12 +13,10 @@
ChatResponse,
ChatResponseUpdate,
Content,
- FinishReason,
FunctionTool,
HostedCodeInterpreterTool,
HostedMCPTool,
HostedWebSearchTool,
- Role,
TextSpanRegion,
UsageDetails,
get_logger,
@@ -172,20 +170,20 @@ class AnthropicChatOptions(ChatOptions[TResponseModel], Generic[TResponseModel],
# region Role and Finish Reason Maps
-ROLE_MAP: dict[Role, str] = {
- Role.USER: "user",
- Role.ASSISTANT: "assistant",
- Role.SYSTEM: "user",
- Role.TOOL: "user",
+ROLE_MAP: dict[str, str] = {
+ "user": "user",
+ "assistant": "assistant",
+ "system": "user",
+ "tool": "user",
}
-FINISH_REASON_MAP: dict[str, FinishReason] = {
- "stop_sequence": FinishReason.STOP,
- "max_tokens": FinishReason.LENGTH,
- "tool_use": FinishReason.TOOL_CALLS,
- "end_turn": FinishReason.STOP,
- "refusal": FinishReason.CONTENT_FILTER,
- "pause_turn": FinishReason.STOP,
+FINISH_REASON_MAP: dict[str, str] = {
+ "stop_sequence": "stop",
+ "max_tokens": "length",
+ "tool_use": "tool_calls",
+ "end_turn": "stop",
+ "refusal": "content_filter",
+ "pause_turn": "stop",
}
@@ -415,7 +413,7 @@ def _prepare_options(
run_options["messages"] = self._prepare_messages_for_anthropic(messages)
# system message - first system message is passed as instructions
- if messages and isinstance(messages[0], ChatMessage) and messages[0].role == Role.SYSTEM:
+ if messages and isinstance(messages[0], ChatMessage) and messages[0].role == "system":
run_options["system"] = messages[0].text
# betas
@@ -502,7 +500,7 @@ def _prepare_messages_for_anthropic(self, messages: MutableSequence[ChatMessage]
as Anthropic expects system instructions as a separate parameter.
"""
# first system message is passed as instructions
- if messages and isinstance(messages[0], ChatMessage) and messages[0].role == Role.SYSTEM:
+ if messages and isinstance(messages[0], ChatMessage) and messages[0].role == "system":
return [self._prepare_message_for_anthropic(msg) for msg in messages[1:]]
return [self._prepare_message_for_anthropic(msg) for msg in messages]
@@ -673,7 +671,7 @@ def _process_message(self, message: BetaMessage, options: dict[str, Any]) -> Cha
response_id=message.id,
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=self._parse_contents_from_anthropic(message.content),
raw_representation=message,
)
diff --git a/python/packages/anthropic/tests/test_anthropic_client.py b/python/packages/anthropic/tests/test_anthropic_client.py
index 6b06843b73..516f644ea7 100644
--- a/python/packages/anthropic/tests/test_anthropic_client.py
+++ b/python/packages/anthropic/tests/test_anthropic_client.py
@@ -11,11 +11,9 @@
ChatOptions,
ChatResponseUpdate,
Content,
- FinishReason,
HostedCodeInterpreterTool,
HostedMCPTool,
HostedWebSearchTool,
- Role,
tool,
)
from agent_framework.exceptions import ServiceInitializationError
@@ -150,7 +148,7 @@ def test_anthropic_client_service_url(mock_anthropic_client: MagicMock) -> None:
def test_prepare_message_for_anthropic_text(mock_anthropic_client: MagicMock) -> None:
"""Test converting text message to Anthropic format."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
- message = ChatMessage(role=Role.USER, text="Hello, world!")
+ message = ChatMessage("user", ["Hello, world!"])
result = chat_client._prepare_message_for_anthropic(message)
@@ -164,7 +162,7 @@ def test_prepare_message_for_anthropic_function_call(mock_anthropic_client: Magi
"""Test converting function call message to Anthropic format."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
message = ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="call_123",
@@ -188,7 +186,7 @@ def test_prepare_message_for_anthropic_function_result(mock_anthropic_client: Ma
"""Test converting function result message to Anthropic format."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
message = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[
Content.from_function_result(
call_id="call_123",
@@ -213,7 +211,7 @@ def test_prepare_message_for_anthropic_text_reasoning(mock_anthropic_client: Mag
"""Test converting text reasoning message to Anthropic format."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
message = ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[Content.from_text_reasoning(text="Let me think about this...")],
)
@@ -229,8 +227,8 @@ def test_prepare_messages_for_anthropic_with_system(mock_anthropic_client: Magic
"""Test converting messages list with system message."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
messages = [
- ChatMessage(role=Role.SYSTEM, text="You are a helpful assistant."),
- ChatMessage(role=Role.USER, text="Hello!"),
+ ChatMessage("system", ["You are a helpful assistant."]),
+ ChatMessage("user", ["Hello!"]),
]
result = chat_client._prepare_messages_for_anthropic(messages)
@@ -245,8 +243,8 @@ def test_prepare_messages_for_anthropic_without_system(mock_anthropic_client: Ma
"""Test converting messages list without system message."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
messages = [
- ChatMessage(role=Role.USER, text="Hello!"),
- ChatMessage(role=Role.ASSISTANT, text="Hi there!"),
+ ChatMessage("user", ["Hello!"]),
+ ChatMessage("assistant", ["Hi there!"]),
]
result = chat_client._prepare_messages_for_anthropic(messages)
@@ -374,7 +372,7 @@ async def test_prepare_options_basic(mock_anthropic_client: MagicMock) -> None:
"""Test _prepare_options with basic ChatOptions."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options = ChatOptions(max_tokens=100, temperature=0.7)
run_options = chat_client._prepare_options(messages, chat_options)
@@ -390,8 +388,8 @@ async def test_prepare_options_with_system_message(mock_anthropic_client: MagicM
chat_client = create_test_anthropic_client(mock_anthropic_client)
messages = [
- ChatMessage(role=Role.SYSTEM, text="You are helpful."),
- ChatMessage(role=Role.USER, text="Hello"),
+ ChatMessage("system", ["You are helpful."]),
+ ChatMessage("user", ["Hello"]),
]
chat_options = ChatOptions()
@@ -405,7 +403,7 @@ async def test_prepare_options_with_tool_choice_auto(mock_anthropic_client: Magi
"""Test _prepare_options with auto tool choice."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options = ChatOptions(tool_choice="auto")
run_options = chat_client._prepare_options(messages, chat_options)
@@ -417,7 +415,7 @@ async def test_prepare_options_with_tool_choice_required(mock_anthropic_client:
"""Test _prepare_options with required tool choice."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
# For required with specific function, need to pass as dict
chat_options = ChatOptions(tool_choice={"mode": "required", "required_function_name": "get_weather"})
@@ -431,7 +429,7 @@ async def test_prepare_options_with_tool_choice_none(mock_anthropic_client: Magi
"""Test _prepare_options with none tool choice."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options = ChatOptions(tool_choice="none")
run_options = chat_client._prepare_options(messages, chat_options)
@@ -448,7 +446,7 @@ def get_weather(location: str) -> str:
"""Get weather for a location."""
return f"Weather for {location}"
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options = ChatOptions(tools=[get_weather])
run_options = chat_client._prepare_options(messages, chat_options)
@@ -461,7 +459,7 @@ async def test_prepare_options_with_stop_sequences(mock_anthropic_client: MagicM
"""Test _prepare_options with stop sequences."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options = ChatOptions(stop=["STOP", "END"])
run_options = chat_client._prepare_options(messages, chat_options)
@@ -473,7 +471,7 @@ async def test_prepare_options_with_top_p(mock_anthropic_client: MagicMock) -> N
"""Test _prepare_options with top_p."""
chat_client = create_test_anthropic_client(mock_anthropic_client)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options = ChatOptions(top_p=0.9)
run_options = chat_client._prepare_options(messages, chat_options)
@@ -500,11 +498,11 @@ def test_process_message_basic(mock_anthropic_client: MagicMock) -> None:
assert response.response_id == "msg_123"
assert response.model_id == "claude-3-5-sonnet-20241022"
assert len(response.messages) == 1
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert len(response.messages[0].contents) == 1
assert response.messages[0].contents[0].type == "text"
assert response.messages[0].contents[0].text == "Hello there!"
- assert response.finish_reason == FinishReason.STOP
+ assert response.finish_reason == "stop"
assert response.usage_details is not None
assert response.usage_details["input_token_count"] == 10
assert response.usage_details["output_token_count"] == 5
@@ -534,7 +532,7 @@ def test_process_message_with_tool_use(mock_anthropic_client: MagicMock) -> None
assert response.messages[0].contents[0].type == "function_call"
assert response.messages[0].contents[0].call_id == "call_123"
assert response.messages[0].contents[0].name == "get_weather"
- assert response.finish_reason == FinishReason.TOOL_CALLS
+ assert response.finish_reason == "tool_calls"
def test_parse_usage_from_anthropic_basic(mock_anthropic_client: MagicMock) -> None:
@@ -668,7 +666,7 @@ async def test_inner_get_response(mock_anthropic_client: MagicMock) -> None:
mock_anthropic_client.beta.messages.create.return_value = mock_message
- messages = [ChatMessage(role=Role.USER, text="Hi")]
+ messages = [ChatMessage("user", ["Hi"])]
chat_options = ChatOptions(max_tokens=10)
response = await chat_client._inner_get_response( # type: ignore[attr-defined]
@@ -692,7 +690,7 @@ async def mock_stream():
mock_anthropic_client.beta.messages.create.return_value = mock_stream()
- messages = [ChatMessage(role=Role.USER, text="Hi")]
+ messages = [ChatMessage("user", ["Hi"])]
chat_options = ChatOptions(max_tokens=10)
chunks: list[ChatResponseUpdate] = []
@@ -723,13 +721,13 @@ async def test_anthropic_client_integration_basic_chat() -> None:
"""Integration test for basic chat completion."""
client = AnthropicClient()
- messages = [ChatMessage(role=Role.USER, text="Say 'Hello, World!' and nothing else.")]
+ messages = [ChatMessage("user", ["Say 'Hello, World!' and nothing else."])]
response = await client.get_response(messages=messages, options={"max_tokens": 50})
assert response is not None
assert len(response.messages) > 0
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert len(response.messages[0].text) > 0
assert response.usage_details is not None
@@ -740,7 +738,7 @@ async def test_anthropic_client_integration_streaming_chat() -> None:
"""Integration test for streaming chat completion."""
client = AnthropicClient()
- messages = [ChatMessage(role=Role.USER, text="Count from 1 to 5.")]
+ messages = [ChatMessage("user", ["Count from 1 to 5."])]
chunks = []
async for chunk in client.get_streaming_response(messages=messages, options={"max_tokens": 50}):
@@ -756,7 +754,7 @@ async def test_anthropic_client_integration_function_calling() -> None:
"""Integration test for function calling."""
client = AnthropicClient()
- messages = [ChatMessage(role=Role.USER, text="What's the weather in San Francisco?")]
+ messages = [ChatMessage("user", ["What's the weather in San Francisco?"])]
tools = [get_weather]
response = await client.get_response(
@@ -776,7 +774,7 @@ async def test_anthropic_client_integration_hosted_tools() -> None:
"""Integration test for hosted tools."""
client = AnthropicClient()
- messages = [ChatMessage(role=Role.USER, text="What tools do you have available?")]
+ messages = [ChatMessage("user", ["What tools do you have available?"])]
tools = [
HostedWebSearchTool(),
HostedCodeInterpreterTool(),
@@ -803,8 +801,8 @@ async def test_anthropic_client_integration_with_system_message() -> None:
client = AnthropicClient()
messages = [
- ChatMessage(role=Role.SYSTEM, text="You are a pirate. Always respond like a pirate."),
- ChatMessage(role=Role.USER, text="Hello!"),
+ ChatMessage("system", ["You are a pirate. Always respond like a pirate."]),
+ ChatMessage("user", ["Hello!"]),
]
response = await client.get_response(messages=messages, options={"max_tokens": 50})
@@ -819,7 +817,7 @@ async def test_anthropic_client_integration_temperature_control() -> None:
"""Integration test with temperature control."""
client = AnthropicClient()
- messages = [ChatMessage(role=Role.USER, text="Say hello.")]
+ messages = [ChatMessage("user", ["Say hello."])]
response = await client.get_response(
messages=messages,
@@ -837,11 +835,11 @@ async def test_anthropic_client_integration_ordering() -> None:
client = AnthropicClient()
messages = [
- ChatMessage(role=Role.USER, text="Say hello."),
- ChatMessage(role=Role.USER, text="Then say goodbye."),
- ChatMessage(role=Role.ASSISTANT, text="Thank you for chatting!"),
- ChatMessage(role=Role.ASSISTANT, text="Let me know if I can help."),
- ChatMessage(role=Role.USER, text="Just testing things."),
+ ChatMessage("user", ["Say hello."]),
+ ChatMessage("user", ["Then say goodbye."]),
+ ChatMessage("assistant", ["Thank you for chatting!"]),
+ ChatMessage("assistant", ["Let me know if I can help."]),
+ ChatMessage("user", ["Just testing things."]),
]
response = await client.get_response(messages=messages)
@@ -863,7 +861,7 @@ async def test_anthropic_client_integration_images() -> None:
messages = [
ChatMessage(
- role=Role.USER,
+ role="user",
contents=[
Content.from_text(text="Describe this image"),
Content.from_data(media_type="image/jpeg", data=image_bytes),
diff --git a/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py b/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py
index ac81a3c50b..e11d3e8793 100644
--- a/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py
+++ b/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py
@@ -5,7 +5,7 @@
from collections.abc import Awaitable, Callable, MutableSequence
from typing import TYPE_CHECKING, Any, ClassVar, Literal
-from agent_framework import AGENT_FRAMEWORK_USER_AGENT, ChatMessage, Context, ContextProvider, Role
+from agent_framework import AGENT_FRAMEWORK_USER_AGENT, ChatMessage, Context, ContextProvider
from agent_framework._logging import get_logger
from agent_framework._pydantic import AFBaseSettings
from agent_framework.exceptions import ServiceInitializationError
@@ -525,9 +525,7 @@ async def invoking(
messages_list = [messages] if isinstance(messages, ChatMessage) else list(messages)
filtered_messages = [
- msg
- for msg in messages_list
- if msg and msg.text and msg.text.strip() and msg.role in [Role.USER, Role.ASSISTANT]
+ msg for msg in messages_list if msg and msg.text and msg.text.strip() and msg.role in ["user", "assistant"]
]
if not filtered_messages:
@@ -548,8 +546,8 @@ async def invoking(
return Context()
# Create context messages: first message with prompt, then one message per result part
- context_messages = [ChatMessage(role=Role.USER, text=self.context_prompt)]
- context_messages.extend([ChatMessage(role=Role.USER, text=part) for part in search_result_parts])
+ context_messages = [ChatMessage("user", [self.context_prompt])]
+ context_messages.extend([ChatMessage("user", [part]) for part in search_result_parts])
return Context(messages=context_messages)
@@ -921,7 +919,7 @@ async def _agentic_search(self, messages: list[ChatMessage]) -> list[str]:
# Medium/low reasoning uses messages with conversation history
kb_messages = [
KnowledgeBaseMessage(
- role=msg.role.value if hasattr(msg.role, "value") else str(msg.role),
+ role=msg.role if hasattr(msg.role, "value") else str(msg.role),
content=[KnowledgeBaseMessageTextContent(text=msg.text)],
)
for msg in messages
diff --git a/python/packages/azure-ai-search/tests/test_search_provider.py b/python/packages/azure-ai-search/tests/test_search_provider.py
index 66ead79a6b..d348f3ef79 100644
--- a/python/packages/azure-ai-search/tests/test_search_provider.py
+++ b/python/packages/azure-ai-search/tests/test_search_provider.py
@@ -5,7 +5,7 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
-from agent_framework import ChatMessage, Context, Role
+from agent_framework import ChatMessage, Context
from agent_framework.azure import AzureAISearchContextProvider, AzureAISearchSettings
from agent_framework.exceptions import ServiceInitializationError
from azure.core.credentials import AzureKeyCredential
@@ -39,7 +39,7 @@ def mock_index_client() -> AsyncMock:
def sample_messages() -> list[ChatMessage]:
"""Create sample chat messages for testing."""
return [
- ChatMessage(role=Role.USER, text="What is in the documents?"),
+ ChatMessage("user", ["What is in the documents?"]),
]
@@ -318,7 +318,7 @@ async def test_semantic_search_empty_query(self, mock_search_class: MagicMock) -
)
# Empty message
- context = await provider.invoking([ChatMessage(role=Role.USER, text="")])
+ context = await provider.invoking([ChatMessage("user", [""])])
assert isinstance(context, Context)
assert len(context.messages) == 0
@@ -520,10 +520,10 @@ async def test_filters_non_user_assistant_messages(self, mock_search_class: Magi
# Mix of message types
messages = [
- ChatMessage(role=Role.SYSTEM, text="System message"),
- ChatMessage(role=Role.USER, text="User message"),
- ChatMessage(role=Role.ASSISTANT, text="Assistant message"),
- ChatMessage(role=Role.TOOL, text="Tool message"),
+ ChatMessage("system", ["System message"]),
+ ChatMessage("user", ["User message"]),
+ ChatMessage("assistant", ["Assistant message"]),
+ ChatMessage("tool", ["Tool message"]),
]
context = await provider.invoking(messages)
@@ -548,9 +548,9 @@ async def test_filters_empty_messages(self, mock_search_class: MagicMock) -> Non
# Messages with empty/whitespace text
messages = [
- ChatMessage(role=Role.USER, text=""),
- ChatMessage(role=Role.USER, text=" "),
- ChatMessage(role=Role.USER, text=None),
+ ChatMessage("user", [""]),
+ ChatMessage("user", [" "]),
+ ChatMessage("user", [None]),
]
context = await provider.invoking(messages)
@@ -581,7 +581,7 @@ async def test_citations_included_in_semantic_search(self, mock_search_class: Ma
mode="semantic",
)
- context = await provider.invoking([ChatMessage(role=Role.USER, text="test query")])
+ context = await provider.invoking([ChatMessage("user", ["test query"])])
# Check that citation is included
assert isinstance(context, Context)
diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py
index 540aacbca2..e2c1c79bdb 100644
--- a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py
+++ b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py
@@ -26,7 +26,6 @@
HostedMCPTool,
HostedWebSearchTool,
Middleware,
- Role,
TextSpanRegion,
ToolProtocol,
UsageDetails,
@@ -353,7 +352,7 @@ async def _inner_get_response(
options: dict[str, Any],
**kwargs: Any,
) -> ChatResponse:
- return await ChatResponse.from_chat_response_generator(
+ return await ChatResponse.from_update_generator(
updates=self._inner_get_streaming_response(messages=messages, options=options, **kwargs),
output_format_type=options.get("response_format"),
)
@@ -638,7 +637,7 @@ async def _process_stream(
match event_data:
case MessageDeltaChunk():
# only one event_type: AgentStreamEvent.THREAD_MESSAGE_DELTA
- role = Role.USER if event_data.delta.role == MessageRole.USER else Role.ASSISTANT
+ role = "user" if event_data.delta.role == "user" else "assistant"
# Extract URL citations from the delta chunk
url_citations = self._extract_url_citations(event_data, azure_search_tool_calls)
@@ -688,7 +687,7 @@ async def _process_stream(
)
if function_call_contents:
yield ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=function_call_contents,
conversation_id=thread_id,
message_id=response_id,
@@ -704,7 +703,7 @@ async def _process_stream(
message_id=response_id,
raw_representation=event_data,
response_id=response_id,
- role=Role.ASSISTANT,
+ role="assistant",
model_id=event_data.model,
)
@@ -733,7 +732,7 @@ async def _process_stream(
)
)
yield ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[usage_content],
conversation_id=thread_id,
message_id=response_id,
@@ -747,7 +746,7 @@ async def _process_stream(
message_id=response_id,
raw_representation=event_data,
response_id=response_id,
- role=Role.ASSISTANT,
+ role="assistant",
)
case RunStepDeltaChunk(): # type: ignore
if (
@@ -776,7 +775,7 @@ async def _process_stream(
Content.from_hosted_file(file_id=output.image.file_id)
)
yield ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=code_contents,
conversation_id=thread_id,
message_id=response_id,
@@ -795,7 +794,7 @@ async def _process_stream(
message_id=response_id,
raw_representation=event_data, # type: ignore
response_id=response_id,
- role=Role.ASSISTANT,
+ role="assistant",
)
except Exception as ex:
logger.error(f"Error processing stream: {ex}")
@@ -1077,7 +1076,7 @@ def _prepare_messages(
additional_messages: list[ThreadMessageOptions] | None = None
for chat_message in messages:
- if chat_message.role.value in ["system", "developer"]:
+ if chat_message.role in ["system", "developer"]:
for text_content in [content for content in chat_message.contents if content.type == "text"]:
instructions.append(text_content.text) # type: ignore[arg-type]
continue
@@ -1107,7 +1106,7 @@ def _prepare_messages(
additional_messages = []
additional_messages.append(
ThreadMessageOptions(
- role=MessageRole.AGENT if chat_message.role == Role.ASSISTANT else MessageRole.USER,
+ role=MessageRole.AGENT if chat_message.role == "assistant" else MessageRole.USER,
content=message_contents,
)
)
diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py
index 202002a45f..15bcd7cfc9 100644
--- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py
+++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py
@@ -482,7 +482,7 @@ def _prepare_messages_for_azure_ai(
# System/developer messages are turned into instructions, since there is no such message roles in Azure AI.
for message in messages:
- if message.role.value in ["system", "developer"]:
+ if message.role in ["system", "developer"]:
for text_content in [content for content in message.contents if content.type == "text"]:
instructions_list.append(text_content.text) # type: ignore[arg-type]
else:
diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py
index 4366ea8141..76c1c75252 100644
--- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py
+++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py
@@ -22,7 +22,6 @@
HostedFileSearchTool,
HostedMCPTool,
HostedWebSearchTool,
- Role,
tool,
)
from agent_framework._serialization import SerializationMixin
@@ -309,7 +308,7 @@ async def empty_async_iter():
mock_stream.__aenter__ = AsyncMock(return_value=empty_async_iter())
mock_stream.__aexit__ = AsyncMock(return_value=None)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
# Call without existing thread - should create new one
response = chat_client.get_streaming_response(messages)
@@ -336,7 +335,7 @@ async def test_azure_ai_chat_client_prepare_options_basic(mock_agents_client: Ma
"""Test _prepare_options with basic ChatOptions."""
chat_client = create_test_azure_ai_chat_client(mock_agents_client)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options: ChatOptions = {"max_tokens": 100, "temperature": 0.7}
run_options, tool_results = await chat_client._prepare_options(messages, chat_options) # type: ignore
@@ -349,7 +348,7 @@ async def test_azure_ai_chat_client_prepare_options_no_chat_options(mock_agents_
"""Test _prepare_options with default ChatOptions."""
chat_client = create_test_azure_ai_chat_client(mock_agents_client)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
run_options, tool_results = await chat_client._prepare_options(messages, {}) # type: ignore
@@ -366,7 +365,7 @@ async def test_azure_ai_chat_client_prepare_options_with_image_content(mock_agen
mock_agents_client.get_agent = AsyncMock(return_value=None)
image_content = Content.from_uri(uri="https://example.com/image.jpg", media_type="image/jpeg")
- messages = [ChatMessage(role=Role.USER, contents=[image_content])]
+ messages = [ChatMessage("user", [image_content])]
run_options, _ = await chat_client._prepare_options(messages, {}) # type: ignore
@@ -455,8 +454,8 @@ async def test_azure_ai_chat_client_prepare_options_with_messages(mock_agents_cl
# Test with system message (becomes instruction)
messages = [
- ChatMessage(role=Role.SYSTEM, text="You are a helpful assistant"),
- ChatMessage(role=Role.USER, text="Hello"),
+ ChatMessage("system", ["You are a helpful assistant"]),
+ ChatMessage("user", ["Hello"]),
]
run_options, _ = await chat_client._prepare_options(messages, {}) # type: ignore
@@ -478,7 +477,7 @@ async def test_azure_ai_chat_client_prepare_options_with_instructions_from_optio
chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent")
mock_agents_client.get_agent = AsyncMock(return_value=None)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options: ChatOptions = {
"instructions": "You are a thoughtful reviewer. Give brief feedback.",
}
@@ -501,8 +500,8 @@ async def test_azure_ai_chat_client_prepare_options_merges_instructions_from_mes
mock_agents_client.get_agent = AsyncMock(return_value=None)
messages = [
- ChatMessage(role=Role.SYSTEM, text="Context: You are reviewing marketing copy."),
- ChatMessage(role=Role.USER, text="Review this tagline"),
+ ChatMessage("system", ["Context: You are reviewing marketing copy."]),
+ ChatMessage("user", ["Review this tagline"]),
]
chat_options: ChatOptions = {
"instructions": "Be concise and constructive in your feedback.",
@@ -520,17 +519,17 @@ async def test_azure_ai_chat_client_prepare_options_merges_instructions_from_mes
async def test_azure_ai_chat_client_inner_get_response(mock_agents_client: MagicMock) -> None:
"""Test _inner_get_response method."""
chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent")
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options: ChatOptions = {}
async def mock_streaming_response():
- yield ChatResponseUpdate(role=Role.ASSISTANT, text="Hello back")
+ yield ChatResponseUpdate(role="assistant", text="Hello back")
with (
patch.object(chat_client, "_inner_get_streaming_response", return_value=mock_streaming_response()),
- patch("agent_framework.ChatResponse.from_chat_response_generator") as mock_from_generator,
+ patch("agent_framework.ChatResponse.from_update_generator") as mock_from_generator,
):
- mock_response = ChatResponse(role=Role.ASSISTANT, text="Hello back")
+ mock_response = ChatResponse(messages=ChatMessage("assistant", ["Hello back"]))
mock_from_generator.return_value = mock_response
result = await chat_client._inner_get_response(messages=messages, options=chat_options) # type: ignore
@@ -673,7 +672,7 @@ async def test_azure_ai_chat_client_prepare_options_tool_choice_required_specifi
dict_tool = {"type": "function", "function": {"name": "test_function"}}
chat_options = {"tools": [dict_tool], "tool_choice": required_tool_mode}
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore
@@ -718,7 +717,7 @@ async def test_azure_ai_chat_client_prepare_options_mcp_never_require(mock_agent
mcp_tool = HostedMCPTool(name="Test MCP Tool", url="https://example.com/mcp", approval_mode="never_require")
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options: ChatOptions = {"tools": [mcp_tool], "tool_choice": "auto"}
with patch("agent_framework_azure_ai._shared.McpTool") as mock_mcp_tool_class:
@@ -750,7 +749,7 @@ async def test_azure_ai_chat_client_prepare_options_mcp_with_headers(mock_agents
name="Test MCP Tool", url="https://example.com/mcp", headers=headers, approval_mode="never_require"
)
- messages = [ChatMessage(role=Role.USER, text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
chat_options: ChatOptions = {"tools": [mcp_tool], "tool_choice": "auto"}
with patch("agent_framework_azure_ai._shared.McpTool") as mock_mcp_tool_class:
@@ -1409,7 +1408,7 @@ async def test_azure_ai_chat_client_get_response() -> None:
"It's a beautiful day for outdoor activities.",
)
)
- messages.append(ChatMessage(role="user", text="What's the weather like today?"))
+ messages.append(ChatMessage("user", ["What's the weather like today?"]))
# Test that the agents_client can be used to get a response
response = await azure_ai_chat_client.get_response(messages=messages)
@@ -1427,7 +1426,7 @@ async def test_azure_ai_chat_client_get_response_tools() -> None:
assert isinstance(azure_ai_chat_client, ChatClientProtocol)
messages: list[ChatMessage] = []
- messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?"))
+ messages.append(ChatMessage("user", ["What's the weather like in Seattle?"]))
# Test that the agents_client can be used to get a response
response = await azure_ai_chat_client.get_response(
@@ -1455,7 +1454,7 @@ async def test_azure_ai_chat_client_streaming() -> None:
"It's a beautiful day for outdoor activities.",
)
)
- messages.append(ChatMessage(role="user", text="What's the weather like today?"))
+ messages.append(ChatMessage("user", ["What's the weather like today?"]))
# Test that the agents_client can be used to get a response
response = azure_ai_chat_client.get_streaming_response(messages=messages)
@@ -1479,7 +1478,7 @@ async def test_azure_ai_chat_client_streaming_tools() -> None:
assert isinstance(azure_ai_chat_client, ChatClientProtocol)
messages: list[ChatMessage] = []
- messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?"))
+ messages.append(ChatMessage("user", ["What's the weather like in Seattle?"]))
# Test that the agents_client can be used to get a response
response = azure_ai_chat_client.get_streaming_response(
@@ -2098,7 +2097,7 @@ def test_azure_ai_chat_client_prepare_messages_with_function_result(
chat_client = create_test_azure_ai_chat_client(mock_agents_client)
function_result = Content.from_function_result(call_id='["run_123", "call_456"]', result="test result")
- messages = [ChatMessage(role=Role.USER, contents=[function_result])]
+ messages = [ChatMessage("user", [function_result])]
additional_messages, instructions, required_action_results = chat_client._prepare_messages(messages) # type: ignore
@@ -2118,7 +2117,7 @@ def test_azure_ai_chat_client_prepare_messages_with_raw_content_block(
# Create content with raw_representation that is a MessageInputContentBlock
raw_block = MessageInputTextBlock(text="Raw block text")
custom_content = Content(type="custom", raw_representation=raw_block)
- messages = [ChatMessage(role=Role.USER, contents=[custom_content])]
+ messages = [ChatMessage("user", [custom_content])]
additional_messages, instructions, required_action_results = chat_client._prepare_messages(messages) # type: ignore
diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py
index 694bcb6604..8563d78cbf 100644
--- a/python/packages/azure-ai/tests/test_azure_ai_client.py
+++ b/python/packages/azure-ai/tests/test_azure_ai_client.py
@@ -22,7 +22,6 @@
HostedFileSearchTool,
HostedMCPTool,
HostedWebSearchTool,
- Role,
tool,
)
from agent_framework.exceptions import ServiceInitializationError
@@ -299,16 +298,16 @@ async def test_prepare_messages_for_azure_ai_with_system_messages(
client = create_test_azure_ai_client(mock_project_client)
messages = [
- ChatMessage(role=Role.SYSTEM, contents=[Content.from_text(text="You are a helpful assistant.")]),
- ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")]),
- ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text="System response")]),
+ ChatMessage("system", [Content.from_text(text="You are a helpful assistant.")]),
+ ChatMessage("user", [Content.from_text(text="Hello")]),
+ ChatMessage("assistant", [Content.from_text(text="System response")]),
]
result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore
assert len(result_messages) == 2
- assert result_messages[0].role == Role.USER
- assert result_messages[1].role == Role.ASSISTANT
+ assert result_messages[0].role == "user"
+ assert result_messages[1].role == "assistant"
assert instructions == "You are a helpful assistant."
@@ -319,8 +318,8 @@ async def test_prepare_messages_for_azure_ai_no_system_messages(
client = create_test_azure_ai_client(mock_project_client)
messages = [
- ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")]),
- ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text="Hi there!")]),
+ ChatMessage("user", [Content.from_text(text="Hello")]),
+ ChatMessage("assistant", [Content.from_text(text="Hi there!")]),
]
result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore
@@ -420,7 +419,7 @@ async def test_prepare_options_basic(mock_project_client: MagicMock) -> None:
"""Test prepare_options basic functionality."""
client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0")
- messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")])]
+ messages = [ChatMessage("user", [Content.from_text(text="Hello")])]
with (
patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}),
@@ -454,7 +453,7 @@ async def test_prepare_options_with_application_endpoint(
agent_version="1",
)
- messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")])]
+ messages = [ChatMessage("user", [Content.from_text(text="Hello")])]
with (
patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}),
@@ -493,7 +492,7 @@ async def test_prepare_options_with_application_project_client(
agent_version="1",
)
- messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")])]
+ messages = [ChatMessage("user", [Content.from_text(text="Hello")])]
with (
patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}),
@@ -969,7 +968,7 @@ async def test_prepare_options_excludes_response_format(
"""Test that prepare_options excludes response_format, text, and text_format from final run options."""
client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0")
- messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")])]
+ messages = [ChatMessage("user", [Content.from_text(text="Hello")])]
chat_options: ChatOptions = {}
with (
@@ -1355,10 +1354,10 @@ async def test_integration_options(
# Prepare test message
if option_name.startswith("tool_choice"):
# Use weather-related prompt for tool tests
- messages = [ChatMessage(role="user", text="What is the weather in Seattle?")]
+ messages = [ChatMessage("user", ["What is the weather in Seattle?"])]
else:
# Generic prompt for simple options
- messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")]
+ messages = [ChatMessage("user", ["Say 'Hello World' briefly."])]
# Build options dict
options: dict[str, Any] = {option_name: option_value, "tools": [get_weather]}
@@ -1372,7 +1371,7 @@ async def test_integration_options(
)
output_format = option_value if option_name == "response_format" else None
- response = await ChatResponse.from_chat_response_generator(response_gen, output_format_type=output_format)
+ response = await ChatResponse.from_update_generator(response_gen, output_format_type=output_format)
else:
# Test non-streaming mode
response = await client.get_response(
@@ -1458,11 +1457,11 @@ async def test_integration_agent_options(
# Prepare test message
if option_name.startswith("response_format"):
# Use prompt that works well with structured output
- messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")]
- messages.append(ChatMessage(role="user", text="What is the weather in Seattle?"))
+ messages = [ChatMessage("user", ["The weather in Seattle is sunny"])]
+ messages.append(ChatMessage("user", ["What is the weather in Seattle?"]))
else:
# Generic prompt for simple options
- messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")]
+ messages = [ChatMessage("user", ["Say 'Hello World' briefly."])]
# Build options dict
options = {option_name: option_value}
@@ -1475,9 +1474,7 @@ async def test_integration_agent_options(
)
output_format = option_value if option_name.startswith("response_format") else None
- response = await ChatResponse.from_chat_response_generator(
- response_gen, output_format_type=output_format
- )
+ response = await ChatResponse.from_update_generator(response_gen, output_format_type=output_format)
else:
# Test non-streaming mode
response = await client.get_response(
@@ -1519,7 +1516,7 @@ async def test_integration_web_search() -> None:
},
}
if streaming:
- response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content))
+ response = await ChatResponse.from_update_generator(client.get_streaming_response(**content))
else:
response = await client.get_response(**content)
@@ -1544,7 +1541,7 @@ async def test_integration_web_search() -> None:
},
}
if streaming:
- response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content))
+ response = await ChatResponse.from_update_generator(client.get_streaming_response(**content))
else:
response = await client.get_response(**content)
assert response.text is not None
diff --git a/python/packages/azurefunctions/tests/test_app.py b/python/packages/azurefunctions/tests/test_app.py
index f8b414fc34..d33ca1f99c 100644
--- a/python/packages/azurefunctions/tests/test_app.py
+++ b/python/packages/azurefunctions/tests/test_app.py
@@ -355,9 +355,7 @@ class TestAgentEntityOperations:
async def test_entity_run_agent_operation(self) -> None:
"""Test that entity can run agent operation."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(
- return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Test response")])
- )
+ mock_agent.run = AsyncMock(return_value=AgentResponse(messages=[ChatMessage("assistant", ["Test response"])]))
entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="test-conv-123"))
@@ -373,9 +371,7 @@ async def test_entity_run_agent_operation(self) -> None:
async def test_entity_stores_conversation_history(self) -> None:
"""Test that the entity stores conversation history."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(
- return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response 1")])
- )
+ mock_agent.run = AsyncMock(return_value=AgentResponse(messages=[ChatMessage("assistant", ["Response 1"])]))
entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="conv-1"))
@@ -407,9 +403,7 @@ async def test_entity_stores_conversation_history(self) -> None:
async def test_entity_increments_message_count(self) -> None:
"""Test that the entity increments the message count."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(
- return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response")])
- )
+ mock_agent.run = AsyncMock(return_value=AgentResponse(messages=[ChatMessage("assistant", ["Response"])]))
entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="conv-1"))
@@ -448,9 +442,7 @@ def test_create_agent_entity_returns_function(self) -> None:
def test_entity_function_handles_run_operation(self) -> None:
"""Test that the entity function handles the run operation."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(
- return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response")])
- )
+ mock_agent.run = AsyncMock(return_value=AgentResponse(messages=[ChatMessage("assistant", ["Response"])]))
entity_function = create_agent_entity(mock_agent)
@@ -475,9 +467,7 @@ def test_entity_function_handles_run_operation(self) -> None:
def test_entity_function_handles_run_agent_operation(self) -> None:
"""Test that the entity function handles the deprecated run_agent operation for backward compatibility."""
mock_agent = Mock()
- mock_agent.run = AsyncMock(
- return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response")])
- )
+ mock_agent.run = AsyncMock(return_value=AgentResponse(messages=[ChatMessage("assistant", ["Response"])]))
entity_function = create_agent_entity(mock_agent)
diff --git a/python/packages/azurefunctions/tests/test_entities.py b/python/packages/azurefunctions/tests/test_entities.py
index 555b588887..909dedd6f8 100644
--- a/python/packages/azurefunctions/tests/test_entities.py
+++ b/python/packages/azurefunctions/tests/test_entities.py
@@ -10,7 +10,7 @@
from unittest.mock import AsyncMock, Mock
import pytest
-from agent_framework import AgentResponse, ChatMessage, Role
+from agent_framework import AgentResponse, ChatMessage
from agent_framework_azurefunctions._entities import create_agent_entity
@@ -19,11 +19,7 @@
def _agent_response(text: str | None) -> AgentResponse:
"""Create an AgentResponse with a single assistant message."""
- message = (
- ChatMessage(role=Role.ASSISTANT, text=text)
- if text is not None
- else ChatMessage(role=Role.ASSISTANT, contents=[])
- )
+ message = ChatMessage("assistant", [text]) if text is not None else ChatMessage("assistant", [])
return AgentResponse(messages=[message])
diff --git a/python/packages/azurefunctions/tests/test_orchestration.py b/python/packages/azurefunctions/tests/test_orchestration.py
index 2b9a4126d4..1f8a029dba 100644
--- a/python/packages/azurefunctions/tests/test_orchestration.py
+++ b/python/packages/azurefunctions/tests/test_orchestration.py
@@ -6,7 +6,7 @@
from unittest.mock import Mock
import pytest
-from agent_framework import AgentResponse, ChatMessage, Role
+from agent_framework import AgentResponse, ChatMessage
from agent_framework_durabletask import DurableAIAgent
from azure.durable_functions.models.Task import TaskBase, TaskState
@@ -136,7 +136,7 @@ def test_try_set_value_success(self) -> None:
# Simulate successful entity task completion
entity_task.state = TaskState.SUCCEEDED
- entity_task.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Test response")]).to_dict()
+ entity_task.result = AgentResponse(messages=[ChatMessage("assistant", ["Test response"])]).to_dict()
# Clear pending_tasks to simulate that parent has processed the child
task.pending_tasks.clear()
@@ -178,7 +178,7 @@ class TestSchema(BaseModel):
# Simulate successful entity task with JSON response
entity_task.state = TaskState.SUCCEEDED
- entity_task.result = AgentResponse(messages=[ChatMessage(role="assistant", text='{"answer": "42"}')]).to_dict()
+ entity_task.result = AgentResponse(messages=[ChatMessage("assistant", ['{"answer": "42"}'])]).to_dict()
# Clear pending_tasks to simulate that parent has processed the child
task.pending_tasks.clear()
@@ -254,7 +254,7 @@ def test_fire_and_forget_returns_acceptance_response(self, executor_with_uuid: t
response = result.result
assert isinstance(response, AgentResponse)
assert len(response.messages) == 1
- assert response.messages[0].role == Role.SYSTEM
+ assert response.messages[0].role == "system"
# Check message contains key information
message_text = response.messages[0].text
assert "accepted" in message_text.lower()
diff --git a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py
index d7e0754c2b..bc67bc7908 100644
--- a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py
+++ b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py
@@ -16,9 +16,7 @@
ChatResponse,
ChatResponseUpdate,
Content,
- FinishReason,
FunctionTool,
- Role,
ToolProtocol,
UsageDetails,
get_logger,
@@ -185,20 +183,20 @@ class BedrockChatOptions(ChatOptions[TResponseModel], Generic[TResponseModel], t
# endregion
-ROLE_MAP: dict[Role, str] = {
- Role.USER: "user",
- Role.ASSISTANT: "assistant",
- Role.SYSTEM: "user",
- Role.TOOL: "user",
+ROLE_MAP: dict[str, str] = {
+ "user": "user",
+ "assistant": "assistant",
+ "system": "user",
+ "tool": "user",
}
-FINISH_REASON_MAP: dict[str, FinishReason] = {
- "end_turn": FinishReason.STOP,
- "stop_sequence": FinishReason.STOP,
- "max_tokens": FinishReason.LENGTH,
- "length": FinishReason.LENGTH,
- "content_filtered": FinishReason.CONTENT_FILTER,
- "tool_use": FinishReason.TOOL_CALLS,
+FINISH_REASON_MAP: dict[str, str] = {
+ "end_turn": "stop",
+ "stop_sequence": "stop",
+ "max_tokens": "length",
+ "length": "length",
+ "content_filtered": "content_filter",
+ "tool_use": "tool_calls",
}
@@ -397,7 +395,7 @@ def _prepare_bedrock_messages(
conversation: list[dict[str, Any]] = []
pending_tool_use_ids: deque[str] = deque()
for message in messages:
- if message.role == Role.SYSTEM:
+ if message.role == "system":
text_value = message.text
if text_value:
prompts.append({"text": text_value})
@@ -414,7 +412,7 @@ def _prepare_bedrock_messages(
for block in content_blocks
if isinstance(block, MutableMapping) and "toolUse" in block
)
- elif message.role == Role.TOOL:
+ elif message.role == "tool":
content_blocks = self._align_tool_results_with_pending(content_blocks, pending_tool_use_ids)
pending_tool_use_ids.clear()
if not content_blocks:
@@ -574,7 +572,7 @@ def _process_converse_response(self, response: dict[str, Any]) -> ChatResponse:
message = output.get("message", {})
content_blocks = message.get("content", []) or []
contents = self._parse_message_contents(content_blocks)
- chat_message = ChatMessage(role=Role.ASSISTANT, contents=contents, raw_representation=message)
+ chat_message = ChatMessage("assistant", contents, raw_representation=message)
usage_details = self._parse_usage(response.get("usage") or output.get("usage"))
finish_reason = self._map_finish_reason(output.get("completionReason") or response.get("stopReason"))
response_id = response.get("responseId") or message.get("id")
@@ -642,7 +640,7 @@ def _parse_message_contents(self, content_blocks: Sequence[MutableMapping[str, A
logger.debug("Ignoring unsupported Bedrock content block: %s", block)
return contents
- def _map_finish_reason(self, reason: str | None) -> FinishReason | None:
+ def _map_finish_reason(self, reason: str | None) -> str | None:
if not reason:
return None
return FINISH_REASON_MAP.get(reason.lower())
diff --git a/python/packages/bedrock/tests/test_bedrock_client.py b/python/packages/bedrock/tests/test_bedrock_client.py
index 704eb2138a..7addad3b73 100644
--- a/python/packages/bedrock/tests/test_bedrock_client.py
+++ b/python/packages/bedrock/tests/test_bedrock_client.py
@@ -6,7 +6,7 @@
from typing import Any
import pytest
-from agent_framework import ChatMessage, Content, Role
+from agent_framework import ChatMessage, Content
from agent_framework.exceptions import ServiceInitializationError
from agent_framework_bedrock import BedrockChatClient
@@ -42,8 +42,8 @@ def test_get_response_invokes_bedrock_runtime() -> None:
)
messages = [
- ChatMessage(role=Role.SYSTEM, contents=[Content.from_text(text="You are concise.")]),
- ChatMessage(role=Role.USER, contents=[Content.from_text(text="hello")]),
+ ChatMessage("system", [Content.from_text(text="You are concise.")]),
+ ChatMessage("user", [Content.from_text(text="hello")]),
]
response = asyncio.run(client.get_response(messages=messages, options={"max_tokens": 32}))
@@ -63,7 +63,7 @@ def test_build_request_requires_non_system_messages() -> None:
client=_StubBedrockRuntime(),
)
- messages = [ChatMessage(role=Role.SYSTEM, contents=[Content.from_text(text="Only system text")])]
+ messages = [ChatMessage("system", [Content.from_text(text="Only system text")])]
with pytest.raises(ServiceInitializationError):
client._prepare_options(messages, {})
diff --git a/python/packages/bedrock/tests/test_bedrock_settings.py b/python/packages/bedrock/tests/test_bedrock_settings.py
index d98cf00817..124892e51d 100644
--- a/python/packages/bedrock/tests/test_bedrock_settings.py
+++ b/python/packages/bedrock/tests/test_bedrock_settings.py
@@ -10,7 +10,6 @@
ChatOptions,
Content,
FunctionTool,
- Role,
)
from pydantic import BaseModel
@@ -47,7 +46,7 @@ def test_build_request_includes_tool_config() -> None:
"tools": [tool],
"tool_choice": {"mode": "required", "required_function_name": "get_weather"},
}
- messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="hi")])]
+ messages = [ChatMessage("user", [Content.from_text(text="hi")])]
request = client._prepare_options(messages, options)
@@ -59,15 +58,15 @@ def test_build_request_serializes_tool_history() -> None:
client = _build_client()
options: ChatOptions = {}
messages = [
- ChatMessage(role=Role.USER, contents=[Content.from_text(text="how's weather?")]),
+ ChatMessage("user", [Content.from_text(text="how's weather?")]),
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(call_id="call-1", name="get_weather", arguments='{"location": "SEA"}')
],
),
ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[Content.from_function_result(call_id="call-1", result={"answer": "72F"})],
),
]
diff --git a/python/packages/chatkit/agent_framework_chatkit/_converter.py b/python/packages/chatkit/agent_framework_chatkit/_converter.py
index 894d54831d..457cfc5e1e 100644
--- a/python/packages/chatkit/agent_framework_chatkit/_converter.py
+++ b/python/packages/chatkit/agent_framework_chatkit/_converter.py
@@ -9,7 +9,6 @@
from agent_framework import (
ChatMessage,
Content,
- Role,
)
from chatkit.types import (
AssistantMessageItem,
@@ -101,21 +100,21 @@ async def user_message_to_input(
# If only text and no attachments, use text parameter for simplicity
if text_content.strip() and not data_contents:
- user_message = ChatMessage(role=Role.USER, text=text_content.strip())
+ user_message = ChatMessage("user", [text_content.strip()])
else:
# Build contents list with both text and attachments
contents: list[Content] = []
if text_content.strip():
contents.append(Content.from_text(text=text_content.strip()))
contents.extend(data_contents)
- user_message = ChatMessage(role=Role.USER, contents=contents)
+ user_message = ChatMessage("user", contents)
# Handle quoted text if this is the last message
messages = [user_message]
if item.quoted_text and is_last_message:
quoted_context = ChatMessage(
- role=Role.USER,
- text=f"The user is referring to this in particular:\n{item.quoted_text}",
+ "user",
+ [f"The user is referring to this in particular:\n{item.quoted_text}"],
)
# Prepend quoted context before the main message
messages.insert(0, quoted_context)
@@ -214,7 +213,7 @@ def hidden_context_to_input(
message = converter.hidden_context_to_input(hidden_item)
# Returns: ChatMessage(role=SYSTEM, text="User's email: ...")
"""
- return ChatMessage(role=Role.SYSTEM, text=f"{item.content}")
+ return ChatMessage("system", [f"{item.content}"])
def tag_to_message_content(self, tag: UserMessageTagContent) -> Content:
"""Convert a ChatKit tag (@-mention) to Agent Framework content.
@@ -293,7 +292,7 @@ def task_to_input(self, item: TaskItem) -> ChatMessage | list[ChatMessage] | Non
f"A message was displayed to the user that the following task was performed:\n\n{task_text}\n"
)
- return ChatMessage(role=Role.USER, text=text)
+ return ChatMessage("user", [text])
def workflow_to_input(self, item: WorkflowItem) -> ChatMessage | list[ChatMessage] | None:
"""Convert a ChatKit WorkflowItem to Agent Framework ChatMessage(s).
@@ -348,7 +347,7 @@ def workflow_to_input(self, item: WorkflowItem) -> ChatMessage | list[ChatMessag
f"\n{task_text}\n"
)
- messages.append(ChatMessage(role=Role.USER, text=text))
+ messages.append(ChatMessage("user", [text]))
return messages if messages else None
@@ -390,7 +389,7 @@ def widget_to_input(self, item: WidgetItem) -> ChatMessage | list[ChatMessage] |
try:
widget_json = item.widget.model_dump_json(exclude_unset=True, exclude_none=True)
text = f"The following graphical UI widget (id: {item.id}) was displayed to the user:{widget_json}"
- return ChatMessage(role=Role.USER, text=text)
+ return ChatMessage("user", [text])
except Exception:
# If JSON serialization fails, skip the widget
return None
@@ -416,7 +415,7 @@ async def assistant_message_to_input(self, item: AssistantMessageItem) -> ChatMe
if not text_parts:
return None
- return ChatMessage(role=Role.ASSISTANT, text="".join(text_parts))
+ return ChatMessage("assistant", ["".join(text_parts)])
async def client_tool_call_to_input(self, item: ClientToolCallItem) -> ChatMessage | list[ChatMessage] | None:
"""Convert a ChatKit ClientToolCallItem to Agent Framework ChatMessage(s).
@@ -442,7 +441,7 @@ async def client_tool_call_to_input(self, item: ClientToolCallItem) -> ChatMessa
# Create function call message
function_call_msg = ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id=item.call_id,
@@ -454,7 +453,7 @@ async def client_tool_call_to_input(self, item: ClientToolCallItem) -> ChatMessa
# Create function result message
function_result_msg = ChatMessage(
- role=Role.TOOL,
+ role="tool",
contents=[
Content.from_function_result(
call_id=item.call_id,
diff --git a/python/packages/chatkit/tests/test_converter.py b/python/packages/chatkit/tests/test_converter.py
index b75139bf58..71400527aa 100644
--- a/python/packages/chatkit/tests/test_converter.py
+++ b/python/packages/chatkit/tests/test_converter.py
@@ -5,7 +5,7 @@
from unittest.mock import Mock
import pytest
-from agent_framework import ChatMessage, Role
+from agent_framework import ChatMessage
from chatkit.types import UserMessageTextContent
from agent_framework_chatkit import ThreadItemConverter, simple_to_agent_input
@@ -44,7 +44,7 @@ async def test_to_agent_input_with_text(self, converter):
assert len(result) == 1
assert isinstance(result[0], ChatMessage)
- assert result[0].role == Role.USER
+ assert result[0].role == "user"
assert result[0].text == "Hello, how can you help me?"
async def test_to_agent_input_empty_text(self, converter):
@@ -117,7 +117,7 @@ def test_hidden_context_to_input(self, converter):
result = converter.hidden_context_to_input(hidden_item)
assert isinstance(result, ChatMessage)
- assert result.role == Role.SYSTEM
+ assert result.role == "system"
assert result.text == "This is hidden context information"
def test_tag_to_message_content(self, converter):
@@ -234,7 +234,7 @@ async def test_to_agent_input_with_image_attachment(self):
assert len(result) == 1
message = result[0]
- assert message.role == Role.USER
+ assert message.role == "user"
assert len(message.contents) == 2
# First content should be text
@@ -303,7 +303,7 @@ def test_task_to_input(self, converter):
result = converter.task_to_input(task_item)
assert isinstance(result, ChatMessage)
- assert result.role == Role.USER
+ assert result.role == "user"
assert "Analysis: Analyzed the data" in result.text
assert "" in result.text
@@ -385,7 +385,7 @@ def test_widget_to_input(self, converter):
result = converter.widget_to_input(widget_item)
assert isinstance(result, ChatMessage)
- assert result.role == Role.USER
+ assert result.role == "user"
assert "widget_1" in result.text
assert "graphical UI widget" in result.text
@@ -418,5 +418,5 @@ async def test_simple_to_agent_input_with_text(self):
assert len(result) == 1
assert isinstance(result[0], ChatMessage)
- assert result[0].role == Role.USER
+ assert result[0].role == "user"
assert result[0].text == "Test message"
diff --git a/python/packages/chatkit/tests/test_streaming.py b/python/packages/chatkit/tests/test_streaming.py
index ff552d79e8..c26a9cb7ac 100644
--- a/python/packages/chatkit/tests/test_streaming.py
+++ b/python/packages/chatkit/tests/test_streaming.py
@@ -4,7 +4,7 @@
from unittest.mock import Mock
-from agent_framework import AgentResponseUpdate, Content, Role
+from agent_framework import AgentResponseUpdate, Content
from chatkit.types import (
ThreadItemAddedEvent,
ThreadItemDoneEvent,
@@ -34,7 +34,7 @@ async def test_stream_single_text_update(self):
"""Test streaming single text update."""
async def single_update_stream():
- yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[Content.from_text(text="Hello world")])
+ yield AgentResponseUpdate(role="assistant", contents=[Content.from_text(text="Hello world")])
events = []
async for event in stream_agent_response(single_update_stream(), thread_id="test_thread"):
@@ -59,8 +59,8 @@ async def test_stream_multiple_text_updates(self):
"""Test streaming multiple text updates."""
async def multiple_updates_stream():
- yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[Content.from_text(text="Hello ")])
- yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[Content.from_text(text="world!")])
+ yield AgentResponseUpdate(role="assistant", contents=[Content.from_text(text="Hello ")])
+ yield AgentResponseUpdate(role="assistant", contents=[Content.from_text(text="world!")])
events = []
async for event in stream_agent_response(multiple_updates_stream(), thread_id="test_thread"):
@@ -91,7 +91,7 @@ def custom_id_generator(item_type: str) -> str:
return f"custom_{item_type}_123"
async def single_update_stream():
- yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[Content.from_text(text="Test")])
+ yield AgentResponseUpdate(role="assistant", contents=[Content.from_text(text="Test")])
events = []
async for event in stream_agent_response(
@@ -107,8 +107,8 @@ async def test_stream_empty_content_updates(self):
"""Test streaming updates with empty content."""
async def empty_content_stream():
- yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[])
- yield AgentResponseUpdate(role=Role.ASSISTANT, contents=None)
+ yield AgentResponseUpdate(role="assistant", contents=[])
+ yield AgentResponseUpdate(role="assistant", contents=None)
events = []
async for event in stream_agent_response(empty_content_stream(), thread_id="test_thread"):
@@ -131,7 +131,7 @@ async def test_stream_non_text_content(self):
non_text_content.text = None
async def non_text_stream():
- yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[non_text_content])
+ yield AgentResponseUpdate(role="assistant", contents=[non_text_content])
events = []
async for event in stream_agent_response(non_text_stream(), thread_id="test_thread"):
diff --git a/python/packages/claude/agent_framework_claude/_agent.py b/python/packages/claude/agent_framework_claude/_agent.py
index f8f3796656..f4439df851 100644
--- a/python/packages/claude/agent_framework_claude/_agent.py
+++ b/python/packages/claude/agent_framework_claude/_agent.py
@@ -16,7 +16,6 @@
Content,
ContextProvider,
FunctionTool,
- Role,
ToolProtocol,
get_logger,
normalize_messages,
@@ -628,7 +627,7 @@ async def run_stream(
text = delta.get("text", "")
if text:
yield AgentResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[Content.from_text(text=text, raw_representation=message)],
raw_representation=message,
)
@@ -636,7 +635,7 @@ async def run_stream(
thinking = delta.get("thinking", "")
if thinking:
yield AgentResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[Content.from_text_reasoning(text=thinking, raw_representation=message)],
raw_representation=message,
)
diff --git a/python/packages/claude/tests/test_claude_agent.py b/python/packages/claude/tests/test_claude_agent.py
index 15fc0b8090..d54489cd0d 100644
--- a/python/packages/claude/tests/test_claude_agent.py
+++ b/python/packages/claude/tests/test_claude_agent.py
@@ -4,7 +4,7 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
-from agent_framework import AgentResponseUpdate, AgentThread, ChatMessage, Content, Role, tool
+from agent_framework import AgentResponseUpdate, AgentThread, ChatMessage, Content, tool
from agent_framework_claude import ClaudeAgent, ClaudeAgentOptions, ClaudeAgentSettings
from agent_framework_claude._agent import TOOLS_MCP_SERVER_NAME
@@ -375,7 +375,7 @@ async def test_run_stream_yields_updates(self) -> None:
updates.append(update)
# StreamEvent yields text deltas
assert len(updates) == 2
- assert updates[0].role == Role.ASSISTANT
+ assert updates[0].role == "assistant"
assert updates[0].text == "Streaming "
assert updates[1].text == "response"
@@ -632,7 +632,7 @@ def test_format_user_message(self) -> None:
"""Test formatting user message."""
agent = ClaudeAgent()
msg = ChatMessage(
- role=Role.USER,
+ role="user",
contents=[Content.from_text(text="Hello")],
)
result = agent._format_prompt([msg]) # type: ignore[reportPrivateUsage]
@@ -642,9 +642,9 @@ def test_format_multiple_messages(self) -> None:
"""Test formatting multiple messages."""
agent = ClaudeAgent()
messages = [
- ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hi")]),
- ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text="Hello!")]),
- ChatMessage(role=Role.USER, contents=[Content.from_text(text="How are you?")]),
+ ChatMessage("user", [Content.from_text(text="Hi")]),
+ ChatMessage("assistant", [Content.from_text(text="Hello!")]),
+ ChatMessage("user", [Content.from_text(text="How are you?")]),
]
result = agent._format_prompt(messages) # type: ignore[reportPrivateUsage]
assert "Hi" in result
diff --git a/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py b/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py
index 98d5a2b475..6d764bf68a 100644
--- a/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py
+++ b/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py
@@ -12,7 +12,6 @@
ChatMessage,
Content,
ContextProvider,
- Role,
normalize_messages,
)
from agent_framework._pydantic import AFBaseSettings
@@ -331,7 +330,7 @@ async def _process_activities(self, activities: AsyncIterable[Any], streaming: b
(activity.type == "message" and not streaming) or (activity.type == "typing" and streaming)
):
yield ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[Content.from_text(activity.text)],
author_name=activity.from_property.name if activity.from_property else None,
message_id=activity.id,
diff --git a/python/packages/copilotstudio/tests/test_copilot_agent.py b/python/packages/copilotstudio/tests/test_copilot_agent.py
index c4e2ff3e08..4f3edbbbfd 100644
--- a/python/packages/copilotstudio/tests/test_copilot_agent.py
+++ b/python/packages/copilotstudio/tests/test_copilot_agent.py
@@ -4,7 +4,7 @@
from unittest.mock import MagicMock, patch
import pytest
-from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, ChatMessage, Content, Role
+from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, ChatMessage, Content
from agent_framework.exceptions import ServiceException, ServiceInitializationError
from microsoft_agents.copilotstudio.client import CopilotClient
@@ -131,7 +131,7 @@ async def test_run_with_string_message(self, mock_copilot_client: MagicMock, moc
content = response.messages[0].contents[0]
assert content.type == "text"
assert content.text == "Test response"
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
async def test_run_with_chat_message(self, mock_copilot_client: MagicMock, mock_activity: MagicMock) -> None:
"""Test run method with ChatMessage."""
@@ -143,7 +143,7 @@ async def test_run_with_chat_message(self, mock_copilot_client: MagicMock, mock_
mock_copilot_client.start_conversation.return_value = create_async_generator([conversation_activity])
mock_copilot_client.ask_question.return_value = create_async_generator([mock_activity])
- chat_message = ChatMessage(role=Role.USER, contents=[Content.from_text("test message")])
+ chat_message = ChatMessage("user", [Content.from_text("test message")])
response = await agent.run(chat_message)
assert isinstance(response, AgentResponse)
@@ -151,7 +151,7 @@ async def test_run_with_chat_message(self, mock_copilot_client: MagicMock, mock_
content = response.messages[0].contents[0]
assert content.type == "text"
assert content.text == "Test response"
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
async def test_run_with_thread(self, mock_copilot_client: MagicMock, mock_activity: MagicMock) -> None:
"""Test run method with existing thread."""
diff --git a/python/packages/core/README.md b/python/packages/core/README.md
index 4113eca061..30ff1b7aa4 100644
--- a/python/packages/core/README.md
+++ b/python/packages/core/README.md
@@ -96,8 +96,8 @@ async def main():
client = OpenAIChatClient()
messages = [
- ChatMessage(role=Role.SYSTEM, text="You are a helpful assistant."),
- ChatMessage(role=Role.USER, text="Write a haiku about Agent Framework.")
+ ChatMessage("system", ["You are a helpful assistant."]),
+ ChatMessage("user", ["Write a haiku about Agent Framework."])
]
response = await client.get_response(messages)
diff --git a/python/packages/core/agent_framework/_agents.py b/python/packages/core/agent_framework/_agents.py
index 4dc6df2eac..5c36d937fa 100644
--- a/python/packages/core/agent_framework/_agents.py
+++ b/python/packages/core/agent_framework/_agents.py
@@ -38,6 +38,7 @@
ChatMessage,
ChatResponse,
ChatResponseUpdate,
+ Content,
normalize_messages,
)
from .exceptions import AgentExecutionException, AgentInitializationError
@@ -209,7 +210,7 @@ def get_new_thread(self, **kwargs):
async def run(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None,
*,
thread: AgentThread | None = None,
**kwargs: Any,
@@ -240,7 +241,7 @@ async def run(
def run_stream(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None,
*,
thread: AgentThread | None = None,
**kwargs: Any,
@@ -490,7 +491,7 @@ async def agent_wrapper(**kwargs: Any) -> str:
stream_callback(update)
# Create final text from accumulated updates
- return AgentResponse.from_agent_run_response_updates(response_updates).text
+ return AgentResponse.from_updates(response_updates).text
agent_tool: FunctionTool[BaseModel, str] = FunctionTool(
name=tool_name,
@@ -755,7 +756,7 @@ def _update_agent_name_and_description(self) -> None:
@overload
async def run(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None,
*,
thread: AgentThread | None = None,
tools: ToolProtocol
@@ -770,7 +771,7 @@ async def run(
@overload
async def run(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None,
*,
thread: AgentThread | None = None,
tools: ToolProtocol
@@ -784,7 +785,7 @@ async def run(
async def run(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None,
*,
thread: AgentThread | None = None,
tools: ToolProtocol
@@ -927,7 +928,7 @@ async def run(
async def run_stream(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None,
*,
thread: AgentThread | None = None,
tools: ToolProtocol
@@ -1043,9 +1044,7 @@ async def run_stream(
raw_representation=update,
)
- response = ChatResponse.from_chat_response_updates(
- response_updates, output_format_type=co.get("response_format")
- )
+ response = ChatResponse.from_updates(response_updates, output_format_type=co.get("response_format"))
await self._update_thread_with_type_and_conversation_id(thread, response.conversation_id)
await self._notify_thread_of_new_messages(
diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py
index 68d9d0312f..60fe7698ea 100644
--- a/python/packages/core/agent_framework/_clients.py
+++ b/python/packages/core/agent_framework/_clients.py
@@ -45,6 +45,7 @@
ChatMessage,
ChatResponse,
ChatResponseUpdate,
+ Content,
prepare_messages,
validate_chat_options,
)
@@ -129,7 +130,7 @@ async def _stream():
@overload
async def get_response(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage],
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage],
*,
options: "ChatOptions[TResponseModelT]",
**kwargs: Any,
@@ -138,7 +139,7 @@ async def get_response(
@overload
async def get_response(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage],
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage],
*,
options: TOptions_contra | None = None,
**kwargs: Any,
@@ -160,7 +161,7 @@ async def get_response(
def get_streaming_response(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage],
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage],
*,
options: TOptions_contra | None = None,
**kwargs: Any,
@@ -219,9 +220,7 @@ class BaseChatClient(SerializationMixin, ABC, Generic[TOptions_co]):
class CustomChatClient(BaseChatClient):
async def _inner_get_response(self, *, messages, options, **kwargs):
# Your custom implementation
- return ChatResponse(
- messages=[ChatMessage(role="assistant", text="Hello!")], response_id="custom-response"
- )
+ return ChatResponse(messages=[ChatMessage("assistant", ["Hello!"])], response_id="custom-response")
async def _inner_get_streaming_response(self, *, messages, options, **kwargs):
# Your custom streaming implementation
@@ -341,7 +340,7 @@ async def _inner_get_streaming_response(
@overload
async def get_response(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage],
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage],
*,
options: "ChatOptions[TResponseModelT]",
**kwargs: Any,
@@ -350,7 +349,7 @@ async def get_response(
@overload
async def get_response(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage],
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage],
*,
options: TOptions_co | None = None,
**kwargs: Any,
@@ -358,7 +357,7 @@ async def get_response(
async def get_response(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage],
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage],
*,
options: TOptions_co | "ChatOptions[Any]" | None = None,
**kwargs: Any,
@@ -381,7 +380,7 @@ async def get_response(
async def get_streaming_response(
self,
- messages: str | ChatMessage | Sequence[str | ChatMessage],
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage],
*,
options: TOptions_co | None = None,
**kwargs: Any,
diff --git a/python/packages/core/agent_framework/_mcp.py b/python/packages/core/agent_framework/_mcp.py
index 9410a6698b..578fb606e1 100644
--- a/python/packages/core/agent_framework/_mcp.py
+++ b/python/packages/core/agent_framework/_mcp.py
@@ -32,7 +32,6 @@
from ._types import (
ChatMessage,
Content,
- Role,
)
from .exceptions import ToolException, ToolExecutionException
@@ -71,7 +70,7 @@ def _parse_message_from_mcp(
) -> ChatMessage:
"""Parse an MCP container type into an Agent Framework type."""
return ChatMessage(
- role=Role(value=mcp_type.role),
+ role=mcp_type.role,
contents=_parse_content_from_mcp(mcp_type.content),
raw_representation=mcp_type,
)
diff --git a/python/packages/core/agent_framework/_middleware.py b/python/packages/core/agent_framework/_middleware.py
index c41c2e7b5b..4cd136a230 100644
--- a/python/packages/core/agent_framework/_middleware.py
+++ b/python/packages/core/agent_framework/_middleware.py
@@ -429,7 +429,7 @@ async def process(self, context: ChatContext, next):
# Add system prompt to messages
from agent_framework import ChatMessage
- context.messages.insert(0, ChatMessage(role="system", content=self.system_prompt))
+ context.messages.insert(0, ChatMessage("system", [self.system_prompt]))
# Continue execution
await next(context)
diff --git a/python/packages/core/agent_framework/_serialization.py b/python/packages/core/agent_framework/_serialization.py
index e4866c12d6..01161435ec 100644
--- a/python/packages/core/agent_framework/_serialization.py
+++ b/python/packages/core/agent_framework/_serialization.py
@@ -38,7 +38,7 @@ class SerializationProtocol(Protocol):
# ChatMessage implements SerializationProtocol via SerializationMixin
- user_msg = ChatMessage(role="user", text="What's the weather like today?")
+ user_msg = ChatMessage("user", ["What's the weather like today?"])
# Serialize to dictionary - automatic type identification and nested serialization
msg_dict = user_msg.to_dict()
@@ -53,7 +53,7 @@ class SerializationProtocol(Protocol):
# Deserialize back to ChatMessage instance - automatic type reconstruction
restored_msg = ChatMessage.from_dict(msg_dict)
print(restored_msg.text) # "What's the weather like today?"
- print(restored_msg.role.value) # "user"
+ print(restored_msg.role) # "user"
# Verify protocol compliance (useful for type checking and validation)
assert isinstance(user_msg, SerializationProtocol)
@@ -175,8 +175,8 @@ class SerializationMixin:
# ChatMessageStoreState handles nested ChatMessage serialization
store_state = ChatMessageStoreState(
messages=[
- ChatMessage(role="user", text="Hello agent"),
- ChatMessage(role="assistant", text="Hi! How can I help?"),
+ ChatMessage("user", ["Hello agent"]),
+ ChatMessage("assistant", ["Hi! How can I help?"]),
]
)
diff --git a/python/packages/core/agent_framework/_threads.py b/python/packages/core/agent_framework/_threads.py
index e44c362324..a9d53c9890 100644
--- a/python/packages/core/agent_framework/_threads.py
+++ b/python/packages/core/agent_framework/_threads.py
@@ -202,7 +202,7 @@ class ChatMessageStore:
store = ChatMessageStore()
# Add messages
- message = ChatMessage(role="user", content="Hello")
+ message = ChatMessage("user", ["Hello"])
await store.add_messages([message])
# Retrieve messages
diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py
index d88fa4b54c..56594ecec2 100644
--- a/python/packages/core/agent_framework/_tools.py
+++ b/python/packages/core/agent_framework/_tools.py
@@ -1831,7 +1831,6 @@ def _replace_approval_contents_with_results(
"""Replace approval request/response contents with function call/result contents in-place."""
from ._types import (
Content,
- Role,
)
result_idx = 0
@@ -1861,7 +1860,7 @@ def _replace_approval_contents_with_results(
if result_idx < len(approved_function_results):
msg.contents[content_idx] = approved_function_results[result_idx]
result_idx += 1
- msg.role = Role.TOOL
+ msg.role = "tool"
else:
# Create a "not approved" result for rejected calls
# Use function_call.call_id (the function's ID), not content.id (approval's ID)
@@ -1869,7 +1868,7 @@ def _replace_approval_contents_with_results(
call_id=content.function_call.call_id, # type: ignore[union-attr, arg-type]
result="Error: Tool call invocation was rejected by user.",
)
- msg.role = Role.TOOL
+ msg.role = "tool"
# Remove approval requests that were duplicates (in reverse order to preserve indices)
for idx in reversed(contents_to_remove):
@@ -1988,13 +1987,12 @@ async def function_invocation_wrapper(
if any(fccr.type == "function_approval_request" for fccr in function_call_results):
# Add approval requests to the existing assistant message (with tool_calls)
# instead of creating a separate tool message
- from ._types import Role
- if response.messages and response.messages[0].role == Role.ASSISTANT:
+ if response.messages and response.messages[0].role == "assistant":
response.messages[0].contents.extend(function_call_results)
else:
# Fallback: create new assistant message (shouldn't normally happen)
- result_message = ChatMessage(role="assistant", contents=function_call_results)
+ result_message = ChatMessage("assistant", function_call_results)
response.messages.append(result_message)
return response
if any(fccr.type == "function_call" for fccr in function_call_results):
@@ -2005,7 +2003,7 @@ async def function_invocation_wrapper(
# This allows middleware to short-circuit the tool loop without another LLM call
if should_terminate:
# Add tool results to response and return immediately without calling LLM again
- result_message = ChatMessage(role="tool", contents=function_call_results)
+ result_message = ChatMessage("tool", function_call_results)
response.messages.append(result_message)
if fcc_messages:
for msg in reversed(fcc_messages):
@@ -2026,7 +2024,7 @@ async def function_invocation_wrapper(
errors_in_a_row = 0
# add a single ChatMessage to the response with the results
- result_message = ChatMessage(role="tool", contents=function_call_results)
+ result_message = ChatMessage("tool", function_call_results)
response.messages.append(result_message)
# response should contain 2 messages after this,
# one with function call contents
@@ -2162,7 +2160,7 @@ async def streaming_function_invocation_wrapper(
# Depending on the prompt, the message may contain both function call
# content and others
- response: "ChatResponse" = ChatResponse.from_chat_response_updates(all_updates)
+ response: "ChatResponse" = ChatResponse.from_updates(all_updates)
# get the function calls (excluding ones that already have results)
function_results = {it.call_id for it in response.messages[0].contents if it.type == "function_result"}
function_calls = [
@@ -2206,15 +2204,14 @@ async def streaming_function_invocation_wrapper(
if any(fccr.type == "function_approval_request" for fccr in function_call_results):
# Add approval requests to the existing assistant message (with tool_calls)
# instead of creating a separate tool message
- from ._types import Role
- if response.messages and response.messages[0].role == Role.ASSISTANT:
+ if response.messages and response.messages[0].role == "assistant":
response.messages[0].contents.extend(function_call_results)
# Yield the approval requests as part of the assistant message
yield ChatResponseUpdate(contents=function_call_results, role="assistant")
else:
# Fallback: create new assistant message (shouldn't normally happen)
- result_message = ChatMessage(role="assistant", contents=function_call_results)
+ result_message = ChatMessage("assistant", function_call_results)
yield ChatResponseUpdate(contents=function_call_results, role="assistant")
response.messages.append(result_message)
return
@@ -2243,7 +2240,7 @@ async def streaming_function_invocation_wrapper(
errors_in_a_row = 0
# add a single ChatMessage to the response with the results
- result_message = ChatMessage(role="tool", contents=function_call_results)
+ result_message = ChatMessage("tool", function_call_results)
yield ChatResponseUpdate(contents=function_call_results, role="tool")
response.messages.append(result_message)
# response should contain 2 messages after this,
diff --git a/python/packages/core/agent_framework/_types.py b/python/packages/core/agent_framework/_types.py
index 9c49d25845..826394b11c 100644
--- a/python/packages/core/agent_framework/_types.py
+++ b/python/packages/core/agent_framework/_types.py
@@ -8,13 +8,12 @@
Callable,
Mapping,
MutableMapping,
- MutableSequence,
Sequence,
)
from copy import deepcopy
-from typing import TYPE_CHECKING, Any, ClassVar, Final, Generic, Literal, cast, overload
+from typing import TYPE_CHECKING, Any, ClassVar, Final, Generic, Literal, NewType, cast, overload
-from pydantic import BaseModel, ValidationError
+from pydantic import BaseModel
from ._logging import get_logger
from ._serialization import SerializationMixin
@@ -40,7 +39,9 @@
"ChatResponseUpdate",
"Content",
"FinishReason",
+ "FinishReasonLiteral",
"Role",
+ "RoleLiteral",
"TextSpanRegion",
"ToolMode",
"UsageDetails",
@@ -62,42 +63,25 @@
# region Content Parsing Utilities
-class EnumLike(type):
- """Generic metaclass for creating enum-like classes with predefined constants.
-
- This metaclass automatically creates class-level constants based on a _constants
- class attribute. Each constant is defined as a tuple of (name, *args) where
- name is the constant name and args are the constructor arguments.
- """
-
- def __new__(mcs, name: str, bases: tuple[type, ...], namespace: dict[str, Any]) -> "EnumLike":
- cls = super().__new__(mcs, name, bases, namespace)
-
- # Create constants if _constants is defined
- if (const := getattr(cls, "_constants", None)) and isinstance(const, dict):
- for const_name, const_args in const.items():
- if isinstance(const_args, (list, tuple)):
- setattr(cls, const_name, cls(*const_args))
- else:
- setattr(cls, const_name, cls(const_args))
-
- return cls
-
-
def _parse_content_list(contents_data: Sequence[Any]) -> list["Content"]:
- """Parse a list of content data dictionaries into appropriate Content objects.
+ """Parse a list of content data into appropriate Content objects.
Args:
- contents_data: List of content data (dicts or already constructed objects)
+ contents_data: List of content data (strings, dicts, or already constructed objects)
Returns:
List of Content objects with unknown types logged and ignored
"""
contents: list["Content"] = []
for content_data in contents_data:
+ if content_data is None:
+ continue
if isinstance(content_data, Content):
contents.append(content_data)
continue
+ if isinstance(content_data, str):
+ contents.append(Content.from_text(text=content_data))
+ continue
try:
contents.append(Content.from_dict(content_data))
except ContentError as exc:
@@ -1420,140 +1404,56 @@ def prepare_function_call_results(content: "Content | Any | list[Content | Any]"
# region Chat Response constants
+RoleLiteral = Literal["system", "user", "assistant", "tool"]
+"""Literal type for known role values. Accepts any string for extensibility."""
-class Role(SerializationMixin, metaclass=EnumLike):
- """Describes the intended purpose of a message within a chat interaction.
-
- Attributes:
- value: The string representation of the role.
-
- Properties:
- SYSTEM: The role that instructs or sets the behavior of the AI system.
- USER: The role that provides user input for chat interactions.
- ASSISTANT: The role that provides responses to system-instructed, user-prompted input.
- TOOL: The role that provides additional information and references in response to tool use requests.
-
- Examples:
- .. code-block:: python
-
- from agent_framework import Role
-
- # Use predefined role constants
- system_role = Role.SYSTEM
- user_role = Role.USER
- assistant_role = Role.ASSISTANT
- tool_role = Role.TOOL
-
- # Create custom role
- custom_role = Role(value="custom")
-
- # Compare roles
- print(system_role == Role.SYSTEM) # True
- print(system_role.value) # "system"
- """
-
- # Constants configuration for EnumLike metaclass
- _constants: ClassVar[dict[str, str]] = {
- "SYSTEM": "system",
- "USER": "user",
- "ASSISTANT": "assistant",
- "TOOL": "tool",
- }
-
- # Type annotations for constants
- SYSTEM: "Role"
- USER: "Role"
- ASSISTANT: "Role"
- TOOL: "Role"
-
- def __init__(self, value: str) -> None:
- """Initialize Role with a value.
-
- Args:
- value: The string representation of the role.
- """
- self.value = value
-
- def __str__(self) -> str:
- """Returns the string representation of the role."""
- return self.value
-
- def __repr__(self) -> str:
- """Returns the string representation of the role."""
- return f"Role(value={self.value!r})"
-
- def __eq__(self, other: object) -> bool:
- """Check if two Role instances are equal."""
- if not isinstance(other, Role):
- return False
- return self.value == other.value
-
- def __hash__(self) -> int:
- """Return hash of the Role for use in sets and dicts."""
- return hash(self.value)
-
+Role = NewType("Role", str)
+"""Type for chat message roles. Use string values directly (e.g., "user", "assistant").
-class FinishReason(SerializationMixin, metaclass=EnumLike):
- """Represents the reason a chat response completed.
+Known values: "system", "user", "assistant", "tool"
- Attributes:
- value: The string representation of the finish reason.
+Examples:
+ .. code-block:: python
- Examples:
- .. code-block:: python
+ from agent_framework import ChatMessage
- from agent_framework import FinishReason
+ # Use string values directly
+ user_msg = ChatMessage("user", ["Hello"])
+ assistant_msg = ChatMessage("assistant", ["Hi there!"])
- # Use predefined finish reason constants
- stop_reason = FinishReason.STOP # Normal completion
- length_reason = FinishReason.LENGTH # Max tokens reached
- tool_calls_reason = FinishReason.TOOL_CALLS # Tool calls triggered
- filter_reason = FinishReason.CONTENT_FILTER # Content filter triggered
+ # Custom roles are also supported
+ custom_msg = ChatMessage("custom", ["Custom role message"])
- # Check finish reason
- if stop_reason == FinishReason.STOP:
- print("Response completed normally")
- """
+ # Compare roles directly as strings
+ if user_msg.role == "user":
+ print("This is a user message")
+"""
- # Constants configuration for EnumLike metaclass
- _constants: ClassVar[dict[str, str]] = {
- "CONTENT_FILTER": "content_filter",
- "LENGTH": "length",
- "STOP": "stop",
- "TOOL_CALLS": "tool_calls",
- }
+FinishReasonLiteral = Literal["stop", "length", "tool_calls", "content_filter"]
+"""Literal type for known finish reason values. Accepts any string for extensibility."""
- # Type annotations for constants
- CONTENT_FILTER: "FinishReason"
- LENGTH: "FinishReason"
- STOP: "FinishReason"
- TOOL_CALLS: "FinishReason"
+FinishReason = NewType("FinishReason", str)
+"""Type for chat response finish reasons. Use string values directly.
- def __init__(self, value: str) -> None:
- """Initialize FinishReason with a value.
-
- Args:
- value: The string representation of the finish reason.
- """
- self.value = value
+Known values:
+ - "stop": Normal completion
+ - "length": Max tokens reached
+ - "tool_calls": Tool calls triggered
+ - "content_filter": Content filter triggered
- def __eq__(self, other: object) -> bool:
- """Check if two FinishReason instances are equal."""
- if not isinstance(other, FinishReason):
- return False
- return self.value == other.value
+Examples:
+ .. code-block:: python
- def __hash__(self) -> int:
- """Return hash of the FinishReason for use in sets and dicts."""
- return hash(self.value)
+ from agent_framework import ChatResponse
- def __str__(self) -> str:
- """Returns the string representation of the finish reason."""
- return self.value
+ response = ChatResponse(messages=[...], finish_reason="stop")
- def __repr__(self) -> str:
- """Returns the string representation of the finish reason."""
- return f"FinishReason(value={self.value!r})"
+ # Check finish reason directly as string
+ if response.finish_reason == "stop":
+ print("Response completed normally")
+ elif response.finish_reason == "tool_calls":
+ print("Tool calls need to be processed")
+"""
# region ChatMessage
@@ -1574,138 +1474,82 @@ class ChatMessage(SerializationMixin):
Examples:
.. code-block:: python
- from agent_framework import ChatMessage, TextContent
+ from agent_framework import ChatMessage, Content
- # Create a message with text
- user_msg = ChatMessage(role="user", text="What's the weather?")
+ # Create a message with text content
+ user_msg = ChatMessage("user", ["What's the weather?"])
print(user_msg.text) # "What's the weather?"
- # Create a message with role string
- system_msg = ChatMessage(role="system", text="You are a helpful assistant.")
+ # Create a system message
+ system_msg = ChatMessage("system", ["You are a helpful assistant."])
- # Create a message with contents
+ # Create a message with mixed content types
assistant_msg = ChatMessage(
- role="assistant",
- contents=[Content.from_text(text="The weather is sunny!")],
+ "assistant",
+ ["The weather is sunny!", Content.from_image_uri("https://...")],
)
print(assistant_msg.text) # "The weather is sunny!"
# Serialization - to_dict and from_dict
msg_dict = user_msg.to_dict()
- # {'type': 'chat_message', 'role': {'type': 'role', 'value': 'user'},
+ # {'type': 'chat_message', 'role': 'user',
# 'contents': [{'type': 'text', 'text': "What's the weather?"}], 'additional_properties': {}}
restored_msg = ChatMessage.from_dict(msg_dict)
print(restored_msg.text) # "What's the weather?"
# Serialization - to_json and from_json
msg_json = user_msg.to_json()
- # '{"type": "chat_message", "role": {"type": "role", "value": "user"}, "contents": [...], ...}'
+ # '{"type": "chat_message", "role": "user", "contents": [...], ...}'
restored_from_json = ChatMessage.from_json(msg_json)
- print(restored_from_json.role.value) # "user"
+ print(restored_from_json.role) # "user"
"""
DEFAULT_EXCLUDE: ClassVar[set[str]] = {"raw_representation"}
- @overload
def __init__(
self,
- role: Role | Literal["system", "user", "assistant", "tool"],
- *,
- text: str,
- author_name: str | None = None,
- message_id: str | None = None,
- additional_properties: MutableMapping[str, Any] | None = None,
- raw_representation: Any | None = None,
- **kwargs: Any,
- ) -> None:
- """Initializes a ChatMessage with a role and text content.
-
- Args:
- role: The role of the author of the message.
-
- Keyword Args:
- text: The text content of the message.
- author_name: Optional name of the author of the message.
- message_id: Optional ID of the chat message.
- additional_properties: Optional additional properties associated with the chat message.
- Additional properties are used within Agent Framework, they are not sent to services.
- raw_representation: Optional raw representation of the chat message.
- **kwargs: Additional keyword arguments.
- """
-
- @overload
- def __init__(
- self,
- role: Role | Literal["system", "user", "assistant", "tool"],
- *,
- contents: "Sequence[Content | Mapping[str, Any]]",
- author_name: str | None = None,
- message_id: str | None = None,
- additional_properties: MutableMapping[str, Any] | None = None,
- raw_representation: Any | None = None,
- **kwargs: Any,
- ) -> None:
- """Initializes a ChatMessage with a role and optional contents.
-
- Args:
- role: The role of the author of the message.
-
- Keyword Args:
- contents: Optional list of BaseContent items to include in the message.
- author_name: Optional name of the author of the message.
- message_id: Optional ID of the chat message.
- additional_properties: Optional additional properties associated with the chat message.
- Additional properties are used within Agent Framework, they are not sent to services.
- raw_representation: Optional raw representation of the chat message.
- **kwargs: Additional keyword arguments.
- """
-
- def __init__(
- self,
- role: Role | Literal["system", "user", "assistant", "tool"] | dict[str, Any],
+ role: RoleLiteral | str,
+ contents: "Sequence[Content | str | Mapping[str, Any]] | None" = None,
*,
text: str | None = None,
- contents: "Sequence[Content | Mapping[str, Any]] | None" = None,
author_name: str | None = None,
message_id: str | None = None,
additional_properties: MutableMapping[str, Any] | None = None,
raw_representation: Any | None = None,
- **kwargs: Any,
) -> None:
"""Initialize ChatMessage.
Args:
- role: The role of the author of the message (Role, string, or dict).
+ role: The role of the author of the message (e.g., "user", "assistant", "system", "tool").
+ contents: A sequence of content items. Can be Content objects, strings (auto-converted
+ to TextContent), or dicts (parsed via Content.from_dict). Defaults to empty list.
Keyword Args:
- text: Optional text content of the message.
- contents: Optional list of BaseContent items or dicts to include in the message.
+ text: Deprecated. Text content of the message. Use contents instead.
+ This parameter is kept for backward compatibility with serialization.
author_name: Optional name of the author of the message.
message_id: Optional ID of the chat message.
additional_properties: Optional additional properties associated with the chat message.
Additional properties are used within Agent Framework, they are not sent to services.
raw_representation: Optional raw representation of the chat message.
- kwargs: will be combined with additional_properties if provided.
"""
- # Handle role conversion
- if isinstance(role, dict):
- role = Role.from_dict(role)
- elif isinstance(role, str):
- role = Role(value=role)
+ # Handle role conversion from legacy dict format
+ if isinstance(role, dict) and "value" in role:
+ role = role["value"]
# Handle contents conversion
parsed_contents = [] if contents is None else _parse_content_list(contents)
+ # Handle text for backward compatibility (from serialization)
if text is not None:
parsed_contents.append(Content.from_text(text=text))
- self.role = role
+ self.role: str = role
self.contents = parsed_contents
self.author_name = author_name
self.message_id = message_id
self.additional_properties = additional_properties or {}
- self.additional_properties.update(kwargs or {})
self.raw_representation = raw_representation
@property
@@ -1719,12 +1563,17 @@ def text(self) -> str:
def prepare_messages(
- messages: str | ChatMessage | Sequence[str | ChatMessage], system_instructions: str | Sequence[str] | None = None
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage],
+ system_instructions: str | Sequence[str] | None = None,
) -> list[ChatMessage]:
"""Convert various message input formats into a list of ChatMessage objects.
Args:
- messages: The input messages in various supported formats.
+ messages: The input messages in various supported formats. Can be:
+ - A string (converted to a user message)
+ - A Content object (wrapped in a user ChatMessage)
+ - A ChatMessage object
+ - A sequence containing any mix of the above
system_instructions: The system instructions. They will be inserted to the start of the messages list.
Returns:
@@ -1733,43 +1582,66 @@ def prepare_messages(
if system_instructions is not None:
if isinstance(system_instructions, str):
system_instructions = [system_instructions]
- system_instruction_messages = [ChatMessage(role="system", text=instr) for instr in system_instructions]
+ system_instruction_messages = [ChatMessage("system", [instr]) for instr in system_instructions]
else:
system_instruction_messages = []
if isinstance(messages, str):
- return [*system_instruction_messages, ChatMessage(role="user", text=messages)]
+ return [*system_instruction_messages, ChatMessage("user", [messages])]
+ if isinstance(messages, Content):
+ return [*system_instruction_messages, ChatMessage("user", [messages])]
if isinstance(messages, ChatMessage):
return [*system_instruction_messages, messages]
return_messages: list[ChatMessage] = system_instruction_messages
for msg in messages:
- if isinstance(msg, str):
- msg = ChatMessage(role="user", text=msg)
+ if isinstance(msg, (str, Content)):
+ msg = ChatMessage("user", [msg])
return_messages.append(msg)
return return_messages
def normalize_messages(
- messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None,
+ messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None,
) -> list[ChatMessage]:
- """Normalize message inputs to a list of ChatMessage objects."""
+ """Normalize message inputs to a list of ChatMessage objects.
+
+ Args:
+ messages: The input messages in various supported formats. Can be:
+ - None (returns empty list)
+ - A string (converted to a user message)
+ - A Content object (wrapped in a user ChatMessage)
+ - A ChatMessage object
+ - A sequence containing any mix of the above
+
+ Returns:
+ A list of ChatMessage objects.
+ """
if messages is None:
return []
if isinstance(messages, str):
- return [ChatMessage(role=Role.USER, text=messages)]
+ return [ChatMessage("user", [messages])]
+
+ if isinstance(messages, Content):
+ return [ChatMessage("user", [messages])]
if isinstance(messages, ChatMessage):
return [messages]
- return [ChatMessage(role=Role.USER, text=msg) if isinstance(msg, str) else msg for msg in messages]
+ result: list[ChatMessage] = []
+ for msg in messages:
+ if isinstance(msg, (str, Content)):
+ result.append(ChatMessage("user", [msg]))
+ else:
+ result.append(msg)
+ return result
def prepend_instructions_to_messages(
messages: list[ChatMessage],
instructions: str | Sequence[str] | None,
- role: Role | Literal["system", "user", "assistant"] = "system",
+ role: RoleLiteral | str = "system",
) -> list[ChatMessage]:
"""Prepend instructions to a list of messages with a specified role.
@@ -1790,7 +1662,7 @@ def prepend_instructions_to_messages(
from agent_framework import prepend_instructions_to_messages, ChatMessage
- messages = [ChatMessage(role="user", text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
instructions = "You are a helpful assistant"
# Prepend as system message (default)
@@ -1805,7 +1677,7 @@ def prepend_instructions_to_messages(
if isinstance(instructions, str):
instructions = [instructions]
- instruction_messages = [ChatMessage(role=role, text=instr) for instr in instructions]
+ instruction_messages = [ChatMessage(role, [instr]) for instr in instructions]
return [*instruction_messages, *messages]
@@ -1829,7 +1701,7 @@ def _process_update(
is_new_message = True
if is_new_message:
- message = ChatMessage(role=Role.ASSISTANT, contents=[])
+ message = ChatMessage("assistant", [])
response.messages.append(message)
else:
message = response.messages[-1]
@@ -1937,31 +1809,32 @@ class ChatResponse(SerializationMixin, Generic[TResponseModel]):
additional_properties: Any additional properties associated with the chat response.
raw_representation: The raw representation of the chat response from an underlying implementation.
+ Note:
+ The `author_name` attribute is available on the `ChatMessage` objects inside `messages`,
+ not on the `ChatResponse` itself. Use `response.messages[0].author_name` to access
+ the author name of individual messages.
+
Examples:
.. code-block:: python
from agent_framework import ChatResponse, ChatMessage
- # Create a simple text response
- response = ChatResponse(text="Hello, how can I help you?")
- print(response.text) # "Hello, how can I help you?"
-
# Create a response with messages
- msg = ChatMessage(role="assistant", text="The weather is sunny.")
+ msg = ChatMessage("assistant", ["The weather is sunny."])
response = ChatResponse(
messages=[msg],
finish_reason="stop",
model_id="gpt-4",
)
+ print(response.text) # "The weather is sunny."
# Combine streaming updates
updates = [...] # List of ChatResponseUpdate objects
- response = ChatResponse.from_chat_response_updates(updates)
+ response = ChatResponse.from_updates(updates)
# Serialization - to_dict and from_dict
response_dict = response.to_dict()
- # {'type': 'chat_response', 'messages': [...], 'model_id': 'gpt-4',
- # 'finish_reason': {'type': 'finish_reason', 'value': 'stop'}}
+ # {'type': 'chat_response', 'messages': [...], 'model_id': 'gpt-4', 'finish_reason': 'stop'}
restored_response = ChatResponse.from_dict(response_dict)
print(restored_response.model_id) # "gpt-4"
@@ -1974,154 +1847,66 @@ class ChatResponse(SerializationMixin, Generic[TResponseModel]):
DEFAULT_EXCLUDE: ClassVar[set[str]] = {"raw_representation", "additional_properties"}
- @overload
- def __init__(
- self,
- *,
- messages: ChatMessage | MutableSequence[ChatMessage],
- response_id: str | None = None,
- conversation_id: str | None = None,
- model_id: str | None = None,
- created_at: CreatedAtT | None = None,
- finish_reason: FinishReason | None = None,
- usage_details: UsageDetails | None = None,
- value: TResponseModel | None = None,
- response_format: type[BaseModel] | None = None,
- additional_properties: dict[str, Any] | None = None,
- raw_representation: Any | None = None,
- **kwargs: Any,
- ) -> None:
- """Initializes a ChatResponse with the provided parameters.
-
- Keyword Args:
- messages: A single ChatMessage or a sequence of ChatMessage objects to include in the response.
- response_id: Optional ID of the chat response.
- conversation_id: Optional identifier for the state of the conversation.
- model_id: Optional model ID used in the creation of the chat response.
- created_at: Optional timestamp for the chat response.
- finish_reason: Optional reason for the chat response.
- usage_details: Optional usage details for the chat response.
- value: Optional value of the structured output.
- response_format: Optional response format for the chat response.
- messages: List of ChatMessage objects to include in the response.
- additional_properties: Optional additional properties associated with the chat response.
- raw_representation: Optional raw representation of the chat response from an underlying implementation.
- **kwargs: Any additional keyword arguments.
- """
-
- @overload
def __init__(
self,
*,
- text: Content | str,
+ messages: ChatMessage | Sequence[ChatMessage] | None = None,
response_id: str | None = None,
conversation_id: str | None = None,
model_id: str | None = None,
created_at: CreatedAtT | None = None,
- finish_reason: FinishReason | None = None,
+ finish_reason: FinishReasonLiteral | str | None = None,
usage_details: UsageDetails | None = None,
value: TResponseModel | None = None,
response_format: type[BaseModel] | None = None,
additional_properties: dict[str, Any] | None = None,
raw_representation: Any | None = None,
- **kwargs: Any,
- ) -> None:
- """Initializes a ChatResponse with the provided parameters.
-
- Keyword Args:
- text: The text content to include in the response. If provided, it will be added as a ChatMessage.
- response_id: Optional ID of the chat response.
- conversation_id: Optional identifier for the state of the conversation.
- model_id: Optional model ID used in the creation of the chat response.
- created_at: Optional timestamp for the chat response.
- finish_reason: Optional reason for the chat response.
- usage_details: Optional usage details for the chat response.
- value: Optional value of the structured output.
- response_format: Optional response format for the chat response.
- additional_properties: Optional additional properties associated with the chat response.
- raw_representation: Optional raw representation of the chat response from an underlying implementation.
- **kwargs: Any additional keyword arguments.
-
- """
-
- def __init__(
- self,
- *,
- messages: ChatMessage | MutableSequence[ChatMessage] | list[dict[str, Any]] | None = None,
- text: Content | str | None = None,
- response_id: str | None = None,
- conversation_id: str | None = None,
- model_id: str | None = None,
- created_at: CreatedAtT | None = None,
- finish_reason: FinishReason | dict[str, Any] | None = None,
- usage_details: UsageDetails | dict[str, Any] | None = None,
- value: TResponseModel | None = None,
- response_format: type[BaseModel] | None = None,
- additional_properties: dict[str, Any] | None = None,
- raw_representation: Any | None = None,
- **kwargs: Any,
) -> None:
"""Initializes a ChatResponse with the provided parameters.
Keyword Args:
- messages: A single ChatMessage or a sequence of ChatMessage objects to include in the response.
- text: The text content to include in the response. If provided, it will be added as a ChatMessage.
+ messages: A single ChatMessage or sequence of ChatMessage objects to include in the response.
response_id: Optional ID of the chat response.
conversation_id: Optional identifier for the state of the conversation.
model_id: Optional model ID used in the creation of the chat response.
created_at: Optional timestamp for the chat response.
- finish_reason: Optional reason for the chat response.
+ finish_reason: Optional reason for the chat response (e.g., "stop", "length", "tool_calls").
usage_details: Optional usage details for the chat response.
value: Optional value of the structured output.
response_format: Optional response format for the chat response.
additional_properties: Optional additional properties associated with the chat response.
raw_representation: Optional raw representation of the chat response from an underlying implementation.
- **kwargs: Any additional keyword arguments.
"""
- # Handle messages conversion
if messages is None:
- messages = []
- elif not isinstance(messages, MutableSequence):
- messages = [messages]
+ self.messages: list[ChatMessage] = []
+ elif isinstance(messages, ChatMessage):
+ self.messages = [messages]
else:
- # Convert any dicts in messages list to ChatMessage objects
- converted_messages: list[ChatMessage] = []
+ # Handle both ChatMessage objects and dicts (for from_dict support)
+ processed_messages: list[ChatMessage] = []
for msg in messages:
- if isinstance(msg, dict):
- converted_messages.append(ChatMessage.from_dict(msg))
+ if isinstance(msg, ChatMessage):
+ processed_messages.append(msg)
+ elif isinstance(msg, dict):
+ processed_messages.append(ChatMessage.from_dict(msg))
else:
- converted_messages.append(msg)
- messages = converted_messages
-
- if text is not None:
- if isinstance(text, str):
- text = Content.from_text(text=text)
- messages.append(ChatMessage(role=Role.ASSISTANT, contents=[text]))
-
- # Handle finish_reason conversion
- if isinstance(finish_reason, dict):
- finish_reason = FinishReason.from_dict(finish_reason)
-
- # Handle usage_details - UsageDetails is now a TypedDict, so dict is already the right type
- # No conversion needed
-
- self.messages = list(messages)
+ processed_messages.append(msg)
+ self.messages = processed_messages
self.response_id = response_id
self.conversation_id = conversation_id
self.model_id = model_id
self.created_at = created_at
- self.finish_reason = finish_reason
+ self.finish_reason: str | None = finish_reason
self.usage_details = usage_details
self._value: TResponseModel | None = value
self._response_format: type[BaseModel] | None = response_format
self._value_parsed: bool = value is not None
self.additional_properties = additional_properties or {}
- self.additional_properties.update(kwargs or {})
self.raw_representation: Any | list[Any] | None = raw_representation
@overload
@classmethod
- def from_chat_response_updates(
+ def from_updates(
cls: type["ChatResponse[Any]"],
updates: Sequence["ChatResponseUpdate"],
*,
@@ -2130,7 +1915,7 @@ def from_chat_response_updates(
@overload
@classmethod
- def from_chat_response_updates(
+ def from_updates(
cls: type["ChatResponse[Any]"],
updates: Sequence["ChatResponseUpdate"],
*,
@@ -2138,7 +1923,7 @@ def from_chat_response_updates(
) -> "ChatResponse[Any]": ...
@classmethod
- def from_chat_response_updates(
+ def from_updates(
cls: type[TChatResponse],
updates: Sequence["ChatResponseUpdate"],
*,
@@ -2153,12 +1938,12 @@ def from_chat_response_updates(
# Create some response updates
updates = [
- ChatResponseUpdate(role="assistant", text="Hello"),
- ChatResponseUpdate(text=" How can I help you?"),
+ ChatResponseUpdate(contents=[Content.from_text(text="Hello")], role="assistant"),
+ ChatResponseUpdate(contents=[Content.from_text(text=" How can I help you?")]),
]
# Combine updates into a single ChatResponse
- response = ChatResponse.from_chat_response_updates(updates)
+ response = ChatResponse.from_updates(updates)
print(response.text) # "Hello How can I help you?"
Args:
@@ -2167,17 +1952,16 @@ def from_chat_response_updates(
Keyword Args:
output_format_type: Optional Pydantic model type to parse the response text into structured data.
"""
- msg = cls(messages=[])
+ response_format = output_format_type if isinstance(output_format_type, type) else None
+ msg = cls(messages=[], response_format=response_format)
for update in updates:
_process_update(msg, update)
_finalize_response(msg)
- if output_format_type:
- msg.try_parse_value(output_format_type)
return msg
@overload
@classmethod
- async def from_chat_response_generator(
+ async def from_update_generator(
cls: type["ChatResponse[Any]"],
updates: AsyncIterable["ChatResponseUpdate"],
*,
@@ -2186,7 +1970,7 @@ async def from_chat_response_generator(
@overload
@classmethod
- async def from_chat_response_generator(
+ async def from_update_generator(
cls: type["ChatResponse[Any]"],
updates: AsyncIterable["ChatResponseUpdate"],
*,
@@ -2194,7 +1978,7 @@ async def from_chat_response_generator(
) -> "ChatResponse[Any]": ...
@classmethod
- async def from_chat_response_generator(
+ async def from_update_generator(
cls: type[TChatResponse],
updates: AsyncIterable["ChatResponseUpdate"],
*,
@@ -2208,7 +1992,7 @@ async def from_chat_response_generator(
from agent_framework import ChatResponse, ChatResponseUpdate, ChatClient
client = ChatClient() # should be a concrete implementation
- response = await ChatResponse.from_chat_response_generator(
+ response = await ChatResponse.from_update_generator(
client.get_streaming_response("Hello, how are you?")
)
print(response.text)
@@ -2224,8 +2008,6 @@ async def from_chat_response_generator(
async for update in updates:
_process_update(msg, update)
_finalize_response(msg)
- if response_format and issubclass(response_format, BaseModel):
- msg.try_parse_value(response_format)
return msg
@property
@@ -2257,47 +2039,6 @@ def value(self) -> TResponseModel | None:
def __str__(self) -> str:
return self.text
- @overload
- def try_parse_value(self, output_format_type: type[TResponseModelT]) -> TResponseModelT | None: ...
-
- @overload
- def try_parse_value(self, output_format_type: None = None) -> TResponseModel | None: ...
-
- def try_parse_value(self, output_format_type: type[BaseModel] | None = None) -> BaseModel | None:
- """Try to parse the text into a typed value.
-
- This is the safe alternative to accessing the value property directly.
- Returns the parsed value on success, or None on failure.
-
- Args:
- output_format_type: The Pydantic model type to parse into.
- If None, uses the response_format from initialization.
-
- Returns:
- The parsed value as the specified type, or None if parsing fails.
- """
- format_type = output_format_type or self._response_format
- if format_type is None or not (isinstance(format_type, type) and issubclass(format_type, BaseModel)):
- return None
-
- # Cache the result unless a different schema than the configured response_format is requested.
- # This prevents calls with a different schema from polluting the cached value.
- use_cache = (
- self._response_format is None or output_format_type is None or output_format_type is self._response_format
- )
-
- if use_cache and self._value_parsed and self._value is not None:
- return self._value # type: ignore[return-value, no-any-return]
- try:
- parsed_value = format_type.model_validate_json(self.text) # type: ignore[reportUnknownMemberType]
- if use_cache:
- self._value = cast(TResponseModel, parsed_value)
- self._value_parsed = True
- return parsed_value # type: ignore[return-value]
- except ValidationError as ex:
- logger.warning("Failed to parse value from chat response text: %s", ex)
- return None
-
# region ChatResponseUpdate
@@ -2308,7 +2049,10 @@ class ChatResponseUpdate(SerializationMixin):
Attributes:
contents: The chat response update content items.
role: The role of the author of the response update.
- author_name: The name of the author of the response update.
+ author_name: The name of the author of the response update. This is primarily used in
+ multi-agent scenarios to identify which agent or participant generated the response.
+ When updates are combined into a `ChatResponse`, the `author_name` is propagated
+ to the resulting `ChatMessage` objects.
response_id: The ID of the response of which this update is a part.
message_id: The ID of the message of which this update is a part.
conversation_id: An identifier for the state of the conversation of which this update is a part.
@@ -2321,9 +2065,9 @@ class ChatResponseUpdate(SerializationMixin):
Examples:
.. code-block:: python
- from agent_framework import ChatResponseUpdate, TextContent
+ from agent_framework import ChatResponseUpdate, Content
- # Create a response update
+ # Create a response update with text content
update = ChatResponseUpdate(
contents=[Content.from_text(text="Hello")],
role="assistant",
@@ -2331,13 +2075,10 @@ class ChatResponseUpdate(SerializationMixin):
)
print(update.text) # "Hello"
- # Create update with text shorthand
- update = ChatResponseUpdate(text="World!", role="assistant")
-
# Serialization - to_dict and from_dict
update_dict = update.to_dict()
# {'type': 'chat_response_update', 'contents': [{'type': 'text', 'text': 'Hello'}],
- # 'role': {'type': 'role', 'value': 'assistant'}, 'message_id': 'msg_123'}
+ # 'role': 'assistant', 'message_id': 'msg_123'}
restored_update = ChatResponseUpdate.from_dict(update_dict)
print(restored_update.text) # "Hello"
@@ -2354,26 +2095,23 @@ class ChatResponseUpdate(SerializationMixin):
def __init__(
self,
*,
- contents: Sequence[Content | dict[str, Any]] | None = None,
- text: Content | str | None = None,
- role: Role | Literal["system", "user", "assistant", "tool"] | dict[str, Any] | None = None,
+ contents: Sequence[Content] | None = None,
+ role: RoleLiteral | str | None = None,
author_name: str | None = None,
response_id: str | None = None,
message_id: str | None = None,
conversation_id: str | None = None,
model_id: str | None = None,
created_at: CreatedAtT | None = None,
- finish_reason: FinishReason | dict[str, Any] | None = None,
+ finish_reason: FinishReasonLiteral | str | None = None,
additional_properties: dict[str, Any] | None = None,
raw_representation: Any | None = None,
- **kwargs: Any,
) -> None:
"""Initializes a ChatResponseUpdate with the provided parameters.
Keyword Args:
- contents: Optional list of BaseContent items or dicts to include in the update.
- text: Optional text content to include in the update.
- role: Optional role of the author of the response update (Role, string, or dict
+ contents: Optional list of Content items to include in the update.
+ role: Optional role of the author of the response update (e.g., "user", "assistant").
author_name: Optional name of the author of the response update.
response_id: Optional ID of the response of which this update is a part.
message_id: Optional ID of the message of which this update is a part.
@@ -2384,36 +2122,36 @@ def __init__(
additional_properties: Optional additional properties associated with the chat response update.
raw_representation: Optional raw representation of the chat response update
from an underlying implementation.
- **kwargs: Any additional keyword arguments.
"""
- # Handle contents conversion
- contents = [] if contents is None else _parse_content_list(contents)
+ # Handle contents - support dict conversion for from_dict
+ if contents is None:
+ self.contents: list[Content] = []
+ else:
+ processed_contents: list[Content] = []
+ for c in contents:
+ if isinstance(c, Content):
+ processed_contents.append(c)
+ elif isinstance(c, dict):
+ processed_contents.append(Content.from_dict(c))
+ else:
+ processed_contents.append(c)
+ self.contents = processed_contents
- if text is not None:
- if isinstance(text, str):
- text = Content.from_text(text=text)
- contents.append(text)
-
- # Handle role conversion
- if isinstance(role, dict):
- role = Role.from_dict(role)
- elif isinstance(role, str):
- role = Role(value=role)
-
- # Handle finish_reason conversion
- if isinstance(finish_reason, dict):
- finish_reason = FinishReason.from_dict(finish_reason)
-
- self.contents = list(contents)
- self.role = role
+ # Handle legacy dict formats for role and finish_reason
+ if isinstance(role, dict) and "value" in role:
+ role = role["value"]
+ if isinstance(finish_reason, dict) and "value" in finish_reason:
+ finish_reason = finish_reason["value"]
+
+ self.role: str | None = role
self.author_name = author_name
self.response_id = response_id
self.message_id = message_id
self.conversation_id = conversation_id
self.model_id = model_id
self.created_at = created_at
- self.finish_reason = finish_reason
+ self.finish_reason: str | None = finish_reason
self.additional_properties = additional_properties
self.raw_representation = raw_representation
@@ -2436,13 +2174,18 @@ class AgentResponse(SerializationMixin, Generic[TResponseModel]):
A typical response will contain a single message, but may contain multiple
messages in scenarios involving function calls, RAG retrievals, or complex logic.
+ Note:
+ The `author_name` attribute is available on the `ChatMessage` objects inside `messages`,
+ not on the `AgentResponse` itself. Use `response.messages[0].author_name` to access
+ the author name of individual messages.
+
Examples:
.. code-block:: python
from agent_framework import AgentResponse, ChatMessage
# Create agent response
- msg = ChatMessage(role="assistant", text="Task completed successfully.")
+ msg = ChatMessage("assistant", ["Task completed successfully."])
response = AgentResponse(messages=[msg], response_id="run_123")
print(response.text) # "Task completed successfully."
@@ -2452,7 +2195,7 @@ class AgentResponse(SerializationMixin, Generic[TResponseModel]):
# Combine streaming updates
updates = [...] # List of AgentResponseUpdate objects
- response = AgentResponse.from_agent_run_response_updates(updates)
+ response = AgentResponse.from_updates(updates)
# Serialization - to_dict and from_dict
response_dict = response.to_dict()
@@ -2473,60 +2216,53 @@ class AgentResponse(SerializationMixin, Generic[TResponseModel]):
def __init__(
self,
*,
- messages: ChatMessage
- | list[ChatMessage]
- | MutableMapping[str, Any]
- | list[MutableMapping[str, Any]]
- | None = None,
+ messages: ChatMessage | Sequence[ChatMessage] | None = None,
response_id: str | None = None,
+ agent_id: str | None = None,
created_at: CreatedAtT | None = None,
- usage_details: UsageDetails | MutableMapping[str, Any] | None = None,
+ usage_details: UsageDetails | None = None,
value: TResponseModel | None = None,
response_format: type[BaseModel] | None = None,
raw_representation: Any | None = None,
additional_properties: dict[str, Any] | None = None,
- **kwargs: Any,
) -> None:
"""Initialize an AgentResponse.
Keyword Args:
- messages: The list of chat messages in the response.
+ messages: A single ChatMessage or sequence of ChatMessage objects to include in the response.
response_id: The ID of the chat response.
+ agent_id: The identifier of the agent that produced this response. Useful in multi-agent
+ scenarios to track which agent generated the response.
created_at: A timestamp for the chat response.
usage_details: The usage details for the chat response.
value: The structured output of the agent run response, if applicable.
response_format: Optional response format for the agent response.
additional_properties: Any additional properties associated with the chat response.
raw_representation: The raw representation of the chat response from an underlying implementation.
- **kwargs: Additional properties to set on the response.
"""
- processed_messages: list[ChatMessage] = []
- if messages is not None:
- if isinstance(messages, ChatMessage):
- processed_messages.append(messages)
- elif isinstance(messages, list):
- for message_data in messages:
- if isinstance(message_data, ChatMessage):
- processed_messages.append(message_data)
- elif isinstance(message_data, MutableMapping):
- processed_messages.append(ChatMessage.from_dict(message_data))
- else:
- logger.warning(f"Unknown message content: {message_data}")
- elif isinstance(messages, MutableMapping):
- processed_messages.append(ChatMessage.from_dict(messages))
-
- # Convert usage_details from dict if needed (for SerializationMixin support)
- # UsageDetails is now a TypedDict, so dict is already the right type
-
- self.messages = processed_messages
+ if messages is None:
+ self.messages: list[ChatMessage] = []
+ elif isinstance(messages, ChatMessage):
+ self.messages = [messages]
+ else:
+ # Handle both ChatMessage objects and dicts (for from_dict support)
+ processed_messages: list[ChatMessage] = []
+ for msg in messages:
+ if isinstance(msg, ChatMessage):
+ processed_messages.append(msg)
+ elif isinstance(msg, dict):
+ processed_messages.append(ChatMessage.from_dict(msg))
+ else:
+ processed_messages.append(msg)
+ self.messages = processed_messages
self.response_id = response_id
+ self.agent_id = agent_id
self.created_at = created_at
self.usage_details = usage_details
self._value: TResponseModel | None = value
self._response_format: type[BaseModel] | None = response_format
self._value_parsed: bool = value is not None
self.additional_properties = additional_properties or {}
- self.additional_properties.update(kwargs or {})
self.raw_representation = raw_representation
@property
@@ -2567,7 +2303,7 @@ def user_input_requests(self) -> list[Content]:
@overload
@classmethod
- def from_agent_run_response_updates(
+ def from_updates(
cls: type["AgentResponse[Any]"],
updates: Sequence["AgentResponseUpdate"],
*,
@@ -2576,7 +2312,7 @@ def from_agent_run_response_updates(
@overload
@classmethod
- def from_agent_run_response_updates(
+ def from_updates(
cls: type["AgentResponse[Any]"],
updates: Sequence["AgentResponseUpdate"],
*,
@@ -2584,7 +2320,7 @@ def from_agent_run_response_updates(
) -> "AgentResponse[Any]": ...
@classmethod
- def from_agent_run_response_updates(
+ def from_updates(
cls: type[TAgentRunResponse],
updates: Sequence["AgentResponseUpdate"],
*,
@@ -2602,8 +2338,6 @@ def from_agent_run_response_updates(
for update in updates:
_process_update(msg, update)
_finalize_response(msg)
- if output_format_type:
- msg.try_parse_value(output_format_type)
return msg
@overload
@@ -2643,54 +2377,11 @@ async def from_agent_response_generator(
async for update in updates:
_process_update(msg, update)
_finalize_response(msg)
- if output_format_type:
- msg.try_parse_value(output_format_type)
return msg
def __str__(self) -> str:
return self.text
- @overload
- def try_parse_value(self, output_format_type: type[TResponseModelT]) -> TResponseModelT | None: ...
-
- @overload
- def try_parse_value(self, output_format_type: None = None) -> TResponseModel | None: ...
-
- def try_parse_value(self, output_format_type: type[BaseModel] | None = None) -> BaseModel | None:
- """Try to parse the text into a typed value.
-
- This is the safe alternative when you need to parse the response text into a typed value.
- Returns the parsed value on success, or None on failure.
-
- Args:
- output_format_type: The Pydantic model type to parse into.
- If None, uses the response_format from initialization.
-
- Returns:
- The parsed value as the specified type, or None if parsing fails.
- """
- format_type = output_format_type or self._response_format
- if format_type is None or not (isinstance(format_type, type) and issubclass(format_type, BaseModel)):
- return None
-
- # Cache the result unless a different schema than the configured response_format is requested.
- # This prevents calls with a different schema from polluting the cached value.
- use_cache = (
- self._response_format is None or output_format_type is None or output_format_type is self._response_format
- )
-
- if use_cache and self._value_parsed and self._value is not None:
- return self._value # type: ignore[return-value, no-any-return]
- try:
- parsed_value = format_type.model_validate_json(self.text) # type: ignore[reportUnknownMemberType]
- if use_cache:
- self._value = cast(TResponseModel, parsed_value)
- self._value_parsed = True
- return parsed_value # type: ignore[return-value]
- except ValidationError as ex:
- logger.warning("Failed to parse value from agent run response text: %s", ex)
- return None
-
# region AgentResponseUpdate
@@ -2698,6 +2389,20 @@ def try_parse_value(self, output_format_type: type[BaseModel] | None = None) ->
class AgentResponseUpdate(SerializationMixin):
"""Represents a single streaming response chunk from an Agent.
+ Attributes:
+ contents: The content items in this update.
+ role: The role of the author of the response update.
+ author_name: The name of the author of the response update. In multi-agent scenarios,
+ this identifies which agent generated this update. When updates are combined into
+ an `AgentResponse`, the `author_name` is propagated to the resulting `ChatMessage` objects.
+ agent_id: The identifier of the agent that produced this update. Useful in multi-agent
+ scenarios to track which agent generated specific parts of the response.
+ response_id: The ID of the response of which this update is a part.
+ message_id: The ID of the message of which this update is a part.
+ created_at: A timestamp for the response update.
+ additional_properties: Any additional properties associated with the update.
+ raw_representation: The raw representation from an underlying implementation.
+
Examples:
.. code-block:: python
@@ -2717,7 +2422,7 @@ class AgentResponseUpdate(SerializationMixin):
# Serialization - to_dict and from_dict
update_dict = update.to_dict()
# {'type': 'agent_response_update', 'contents': [{'type': 'text', 'text': 'Processing...'}],
- # 'role': {'type': 'role', 'value': 'assistant'}, 'response_id': 'run_123'}
+ # 'role': 'assistant', 'response_id': 'run_123'}
restored_update = AgentResponseUpdate.from_dict(update_dict)
print(restored_update.response_id) # "run_123"
@@ -2733,48 +2438,52 @@ class AgentResponseUpdate(SerializationMixin):
def __init__(
self,
*,
- contents: Sequence[Content | MutableMapping[str, Any]] | None = None,
- text: Content | str | None = None,
- role: Role | MutableMapping[str, Any] | str | None = None,
+ contents: Sequence[Content] | None = None,
+ role: RoleLiteral | str | None = None,
author_name: str | None = None,
+ agent_id: str | None = None,
response_id: str | None = None,
message_id: str | None = None,
created_at: CreatedAtT | None = None,
- additional_properties: MutableMapping[str, Any] | None = None,
+ additional_properties: dict[str, Any] | None = None,
raw_representation: Any | None = None,
- **kwargs: Any,
) -> None:
"""Initialize an AgentResponseUpdate.
Keyword Args:
- contents: Optional list of BaseContent items or dicts to include in the update.
- text: Optional text content of the update.
- role: The role of the author of the response update (Role, string, or dict
- author_name: Optional name of the author of the response update.
+ contents: Optional list of Content items to include in the update.
+ role: The role of the author of the response update (e.g., "user", "assistant").
+ author_name: Optional name of the author of the response update. Used in multi-agent
+ scenarios to identify which agent generated this update.
+ agent_id: Optional identifier of the agent that produced this update.
response_id: Optional ID of the response of which this update is a part.
message_id: Optional ID of the message of which this update is a part.
created_at: Optional timestamp for the chat response update.
additional_properties: Optional additional properties associated with the chat response update.
raw_representation: Optional raw representation of the chat response update.
- kwargs: will be combined with additional_properties if provided.
"""
- parsed_contents: list[Content] = [] if contents is None else _parse_content_list(contents)
-
- if text is not None:
- if isinstance(text, str):
- text = Content.from_text(text=text)
- parsed_contents.append(text)
+ # Handle contents - support dict conversion for from_dict
+ if contents is None:
+ self.contents: list[Content] = []
+ else:
+ processed_contents: list[Content] = []
+ for c in contents:
+ if isinstance(c, Content):
+ processed_contents.append(c)
+ elif isinstance(c, dict):
+ processed_contents.append(Content.from_dict(c))
+ else:
+ processed_contents.append(c)
+ self.contents = processed_contents
- # Convert role from dict if needed (for SerializationMixin support)
- if isinstance(role, MutableMapping):
- role = Role.from_dict(role)
- elif isinstance(role, str):
- role = Role(value=role)
+ # Handle legacy dict format for role
+ if isinstance(role, dict) and "value" in role:
+ role = role["value"]
- self.contents = parsed_contents
- self.role = role
+ self.role: str | None = role
self.author_name = author_name
+ self.agent_id = agent_id
self.response_id = response_id
self.message_id = message_id
self.created_at = created_at
diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py
index 1543ed7db6..28482820a0 100644
--- a/python/packages/core/agent_framework/_workflows/_agent.py
+++ b/python/packages/core/agent_framework/_workflows/_agent.py
@@ -16,7 +16,6 @@
BaseAgent,
ChatMessage,
Content,
- Role,
UsageDetails,
)
@@ -344,7 +343,7 @@ def _convert_workflow_event_to_agent_update(
return None
return AgentResponseUpdate(
contents=contents,
- role=Role.ASSISTANT,
+ role="assistant",
author_name=executor_id,
response_id=response_id,
message_id=str(uuid.uuid4()),
@@ -370,7 +369,7 @@ def _convert_workflow_event_to_agent_update(
)
return AgentResponseUpdate(
contents=[function_call, approval_request],
- role=Role.ASSISTANT,
+ role="assistant",
author_name=self.name,
response_id=response_id,
message_id=str(uuid.uuid4()),
@@ -453,7 +452,7 @@ def merge_updates(updates: list[AgentResponseUpdate], response_id: str) -> Agent
- Group updates by response_id; within each response_id, group by message_id and keep a dangling bucket for
updates without message_id.
- Convert each group (per message and dangling) into an intermediate AgentResponse via
- AgentResponse.from_agent_run_response_updates, then sort by created_at and merge.
+ AgentResponse.from_updates, then sort by created_at and merge.
- Append messages from updates without any response_id at the end (global dangling), while aggregating metadata.
Args:
@@ -548,9 +547,9 @@ def _add_raw(value: object) -> None:
per_message_responses: list[AgentResponse] = []
for _, msg_updates in by_msg.items():
if msg_updates:
- per_message_responses.append(AgentResponse.from_agent_run_response_updates(msg_updates))
+ per_message_responses.append(AgentResponse.from_updates(msg_updates))
if dangling:
- per_message_responses.append(AgentResponse.from_agent_run_response_updates(dangling))
+ per_message_responses.append(AgentResponse.from_updates(dangling))
per_message_responses.sort(key=lambda r: _parse_dt(r.created_at))
@@ -584,7 +583,7 @@ def _add_raw(value: object) -> None:
# These are updates that couldn't be associated with any response_id
# (e.g., orphan FunctionResultContent with no matching FunctionCallContent)
if global_dangling:
- flattened = AgentResponse.from_agent_run_response_updates(global_dangling)
+ flattened = AgentResponse.from_updates(global_dangling)
final_messages.extend(flattened.messages)
if flattened.usage_details:
merged_usage = add_usage_details(merged_usage, flattened.usage_details) # type: ignore[arg-type]
diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py
index 80bd4aba43..9849d351d1 100644
--- a/python/packages/core/agent_framework/_workflows/_agent_executor.py
+++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py
@@ -198,7 +198,7 @@ async def handle_user_input_response(
if not self._pending_agent_requests:
# All pending requests have been resolved; resume agent execution
- self._cache = normalize_messages_input(ChatMessage(role="user", contents=self._pending_responses_to_agent))
+ self._cache = normalize_messages_input(ChatMessage("user", self._pending_responses_to_agent))
self._pending_responses_to_agent.clear()
await self._run_agent_and_emit(ctx)
@@ -378,12 +378,12 @@ async def _run_agent_streaming(self, ctx: WorkflowContext) -> AgentResponse | No
# Build the final AgentResponse from the collected updates
if isinstance(self._agent, ChatAgent):
response_format = self._agent.default_options.get("response_format")
- response = AgentResponse.from_agent_run_response_updates(
+ response = AgentResponse.from_updates(
updates,
output_format_type=response_format,
)
else:
- response = AgentResponse.from_agent_run_response_updates(updates)
+ response = AgentResponse.from_updates(updates)
# Handle any user input requests after the streaming completes
if user_input_requests:
diff --git a/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py b/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py
index e3cc4bc7d2..4c4d69f7bd 100644
--- a/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py
+++ b/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py
@@ -14,7 +14,7 @@
from typing_extensions import Never
-from .._types import ChatMessage, Role
+from .._types import ChatMessage
from ._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse
from ._events import WorkflowEvent
from ._executor import Executor, handler
@@ -214,7 +214,7 @@ async def handle_str(
Usage:
workflow.run("Write a blog post about AI agents")
"""
- await self._handle_messages([ChatMessage(role=Role.USER, text=task)], ctx)
+ await self._handle_messages([ChatMessage("user", [task])], ctx)
@handler
async def handle_message(
@@ -231,7 +231,7 @@ async def handle_message(
ctx: Workflow context
Usage:
- workflow.run(ChatMessage(role=Role.USER, text="Write a blog post about AI agents"))
+ workflow.run(ChatMessage("user", ["Write a blog post about AI agents"]))
"""
await self._handle_messages([task], ctx)
@@ -250,8 +250,8 @@ async def handle_messages(
ctx: Workflow context
Usage:
workflow.run([
- ChatMessage(role=Role.USER, text="Write a blog post about AI agents"),
- ChatMessage(role=Role.USER, text="Make it engaging and informative.")
+ ChatMessage("user", ["Write a blog post about AI agents"]),
+ ChatMessage("user", ["Make it engaging and informative."])
])
"""
if not task:
@@ -401,7 +401,7 @@ def _create_completion_message(self, message: str) -> ChatMessage:
Returns:
ChatMessage with completion content
"""
- return ChatMessage(role=Role.ASSISTANT, text=message, author_name=self._name)
+ return ChatMessage("assistant", [message], author_name=self._name)
# Participant routing (shared across all patterns)
@@ -465,7 +465,7 @@ async def _send_request_to_participant(
# AgentExecutors receive simple message list
messages: list[ChatMessage] = []
if additional_instruction:
- messages.append(ChatMessage(role=Role.USER, text=additional_instruction))
+ messages.append(ChatMessage("user", [additional_instruction]))
request = AgentExecutorRequest(messages=messages, should_respond=True)
await ctx.send_message(request, target_id=target)
await ctx.add_event(
diff --git a/python/packages/core/agent_framework/_workflows/_concurrent.py b/python/packages/core/agent_framework/_workflows/_concurrent.py
index 4204c8cd6d..afa0ef99e7 100644
--- a/python/packages/core/agent_framework/_workflows/_concurrent.py
+++ b/python/packages/core/agent_framework/_workflows/_concurrent.py
@@ -8,7 +8,7 @@
from typing_extensions import Never
-from agent_framework import AgentProtocol, ChatMessage, Role
+from agent_framework import AgentProtocol, ChatMessage
from ._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse
from ._agent_utils import resolve_agent_id
@@ -91,16 +91,13 @@ async def aggregate(
logger.error("Concurrent aggregator received empty results list")
raise ValueError("Aggregation failed: no results provided")
- def _is_role(msg: Any, role: Role) -> bool:
+ def _is_role(msg: Any, role: str) -> bool:
r = getattr(msg, "role", None)
if r is None:
return False
# Normalize both r and role to lowercase strings for comparison
r_str = str(r).lower() if isinstance(r, str) or hasattr(r, "__str__") else r
- role_str = getattr(role, "value", None)
- if role_str is None:
- role_str = str(role)
- role_str = role_str.lower()
+ role_str = str(role).lower()
return r_str == role_str
prompt_message: ChatMessage | None = None
@@ -117,14 +114,14 @@ def _is_role(msg: Any, role: Role) -> bool:
# Capture a single user prompt (first encountered across any conversation)
if prompt_message is None:
- found_user = next((m for m in conv if _is_role(m, Role.USER)), None)
+ found_user = next((m for m in conv if _is_role(m, "user")), None)
if found_user is not None:
prompt_message = found_user
# Pick the final assistant message from the response; fallback to conversation search
- final_assistant = next((m for m in reversed(resp_messages) if _is_role(m, Role.ASSISTANT)), None)
+ final_assistant = next((m for m in reversed(resp_messages) if _is_role(m, "assistant")), None)
if final_assistant is None:
- final_assistant = next((m for m in reversed(conv) if _is_role(m, Role.ASSISTANT)), None)
+ final_assistant = next((m for m in reversed(conv) if _is_role(m, "assistant")), None)
if final_assistant is not None:
assistant_replies.append(final_assistant)
diff --git a/python/packages/core/agent_framework/_workflows/_conversation_state.py b/python/packages/core/agent_framework/_workflows/_conversation_state.py
index 8c21513f6c..084cf9cda3 100644
--- a/python/packages/core/agent_framework/_workflows/_conversation_state.py
+++ b/python/packages/core/agent_framework/_workflows/_conversation_state.py
@@ -3,7 +3,7 @@
from collections.abc import Iterable
from typing import Any, cast
-from agent_framework import ChatMessage, Role
+from agent_framework import ChatMessage
from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value
@@ -40,15 +40,13 @@ def decode_chat_messages(payload: Iterable[dict[str, Any]]) -> list[ChatMessage]
continue
role_value = decode_checkpoint_value(item.get("role"))
- if isinstance(role_value, Role):
+ if isinstance(role_value, str):
role = role_value
- elif isinstance(role_value, dict):
- role_dict = cast(dict[str, Any], role_value)
- role = Role.from_dict(role_dict)
- elif isinstance(role_value, str):
- role = Role(value=role_value)
+ elif isinstance(role_value, dict) and "value" in role_value:
+ # Handle legacy serialization format
+ role = role_value["value"]
else:
- role = Role.ASSISTANT
+ role = "assistant"
contents_field = item.get("contents", [])
contents: list[Any] = []
diff --git a/python/packages/core/agent_framework/_workflows/_group_chat.py b/python/packages/core/agent_framework/_workflows/_group_chat.py
index 3f92d9ebf2..4b25ca1b77 100644
--- a/python/packages/core/agent_framework/_workflows/_group_chat.py
+++ b/python/packages/core/agent_framework/_workflows/_group_chat.py
@@ -31,7 +31,7 @@
from .._agents import AgentProtocol, ChatAgent
from .._threads import AgentThread
-from .._types import ChatMessage, Role
+from .._types import ChatMessage
from ._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse
from ._agent_utils import resolve_agent_id
from ._base_group_chat_orchestrator import (
@@ -424,7 +424,7 @@ async def _invoke_agent_helper(conversation: list[ChatMessage]) -> AgentOrchestr
])
)
# Prepend instruction as system message
- current_conversation.append(ChatMessage(role=Role.USER, text=instruction))
+ current_conversation.append(ChatMessage("user", [instruction]))
retry_attempts = self._retry_attempts
while True:
@@ -439,7 +439,7 @@ async def _invoke_agent_helper(conversation: list[ChatMessage]) -> AgentOrchestr
# We don't need the full conversation since the thread should maintain history
current_conversation = [
ChatMessage(
- role=Role.USER,
+ role="user",
text=f"Your input could not be parsed due to an error: {ex}. Please try again.",
)
]
@@ -782,7 +782,7 @@ def with_termination_condition(self, termination_condition: TerminationCondition
def stop_after_two_calls(conversation: list[ChatMessage]) -> bool:
- calls = sum(1 for msg in conversation if msg.role == Role.ASSISTANT and msg.author_name == "specialist")
+ calls = sum(1 for msg in conversation if msg.role == "assistant" and msg.author_name == "specialist")
return calls >= 2
diff --git a/python/packages/core/agent_framework/_workflows/_handoff.py b/python/packages/core/agent_framework/_workflows/_handoff.py
index e529e09111..875fdc36c8 100644
--- a/python/packages/core/agent_framework/_workflows/_handoff.py
+++ b/python/packages/core/agent_framework/_workflows/_handoff.py
@@ -42,7 +42,7 @@
from .._middleware import FunctionInvocationContext, FunctionMiddleware
from .._threads import AgentThread
from .._tools import FunctionTool, tool
-from .._types import AgentResponse, ChatMessage, Role
+from .._types import AgentResponse, ChatMessage
from ._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse
from ._agent_utils import resolve_agent_id
from ._base_group_chat_orchestrator import TerminationCondition
@@ -162,7 +162,7 @@ def create_response(response: str | list[str] | ChatMessage | list[ChatMessage])
"""Create a HandoffAgentUserRequest from a simple text response."""
messages: list[ChatMessage] = []
if isinstance(response, str):
- messages.append(ChatMessage(role=Role.USER, text=response))
+ messages.append(ChatMessage("user", [response]))
elif isinstance(response, ChatMessage):
messages.append(response)
elif isinstance(response, list):
@@ -170,7 +170,7 @@ def create_response(response: str | list[str] | ChatMessage | list[ChatMessage])
if isinstance(item, ChatMessage):
messages.append(item)
elif isinstance(item, str):
- messages.append(ChatMessage(role=Role.USER, text=item))
+ messages.append(ChatMessage("user", [item]))
else:
raise TypeError("List items must be either str or ChatMessage instances")
else:
@@ -427,7 +427,7 @@ async def _run_agent_and_emit(self, ctx: WorkflowContext[AgentExecutorResponse,
# or a termination condition is met.
# This allows the agent to perform long-running tasks without returning control
# to the coordinator or user prematurely.
- self._cache.extend([ChatMessage(role=Role.USER, text=self._autonomous_mode_prompt)])
+ self._cache.extend([ChatMessage("user", [self._autonomous_mode_prompt])])
self._autonomous_mode_turns += 1
await self._run_agent_and_emit(ctx)
else:
diff --git a/python/packages/core/agent_framework/_workflows/_magentic.py b/python/packages/core/agent_framework/_workflows/_magentic.py
index eff87fd5f0..221f16bae6 100644
--- a/python/packages/core/agent_framework/_workflows/_magentic.py
+++ b/python/packages/core/agent_framework/_workflows/_magentic.py
@@ -18,7 +18,6 @@
AgentProtocol,
AgentResponse,
ChatMessage,
- Role,
)
from ._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse
@@ -607,14 +606,14 @@ async def plan(self, magentic_context: MagenticContext) -> ChatMessage:
# Gather facts
facts_user = ChatMessage(
- role=Role.USER,
+ role="user",
text=self.task_ledger_facts_prompt.format(task=magentic_context.task),
)
facts_msg = await self._complete([*magentic_context.chat_history, facts_user])
# Create plan
plan_user = ChatMessage(
- role=Role.USER,
+ role="user",
text=self.task_ledger_plan_prompt.format(team=team_text),
)
plan_msg = await self._complete([*magentic_context.chat_history, facts_user, facts_msg, plan_user])
@@ -632,7 +631,7 @@ async def plan(self, magentic_context: MagenticContext) -> ChatMessage:
facts=facts_msg.text,
plan=plan_msg.text,
)
- return ChatMessage(role=Role.ASSISTANT, text=combined, author_name=MAGENTIC_MANAGER_NAME)
+ return ChatMessage("assistant", [combined], author_name=MAGENTIC_MANAGER_NAME)
async def replan(self, magentic_context: MagenticContext) -> ChatMessage:
"""Update facts and plan when stalling or looping has been detected."""
@@ -643,17 +642,19 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage:
# Update facts
facts_update_user = ChatMessage(
- role=Role.USER,
- text=self.task_ledger_facts_update_prompt.format(
- task=magentic_context.task, old_facts=self.task_ledger.facts.text
- ),
+ "user",
+ [
+ self.task_ledger_facts_update_prompt.format(
+ task=magentic_context.task, old_facts=self.task_ledger.facts.text
+ )
+ ],
)
updated_facts = await self._complete([*magentic_context.chat_history, facts_update_user])
# Update plan
plan_update_user = ChatMessage(
- role=Role.USER,
- text=self.task_ledger_plan_update_prompt.format(team=team_text),
+ "user",
+ [self.task_ledger_plan_update_prompt.format(team=team_text)],
)
updated_plan = await self._complete([
*magentic_context.chat_history,
@@ -675,7 +676,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage:
facts=updated_facts.text,
plan=updated_plan.text,
)
- return ChatMessage(role=Role.ASSISTANT, text=combined, author_name=MAGENTIC_MANAGER_NAME)
+ return ChatMessage("assistant", [combined], author_name=MAGENTIC_MANAGER_NAME)
async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger:
"""Use the model to produce a JSON progress ledger based on the conversation so far.
@@ -695,7 +696,7 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag
team=team_text,
names=names_csv,
)
- user_message = ChatMessage(role=Role.USER, text=prompt)
+ user_message = ChatMessage("user", [prompt])
# Include full context to help the model decide current stage, with small retry loop
attempts = 0
@@ -722,11 +723,11 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag
async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage:
"""Ask the model to produce the final answer addressed to the user."""
prompt = self.final_answer_prompt.format(task=magentic_context.task)
- user_message = ChatMessage(role=Role.USER, text=prompt)
+ user_message = ChatMessage("user", [prompt])
response = await self._complete([*magentic_context.chat_history, user_message])
# Ensure role is assistant
return ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
text=response.text,
author_name=response.author_name or MAGENTIC_MANAGER_NAME,
)
@@ -812,11 +813,11 @@ def approve() -> "MagenticPlanReviewResponse":
def revise(feedback: str | list[str] | ChatMessage | list[ChatMessage]) -> "MagenticPlanReviewResponse":
"""Create a revision response with feedback."""
if isinstance(feedback, str):
- feedback = [ChatMessage(role=Role.USER, text=feedback)]
+ feedback = [ChatMessage("user", [feedback])]
elif isinstance(feedback, ChatMessage):
feedback = [feedback]
elif isinstance(feedback, list):
- feedback = [ChatMessage(role=Role.USER, text=item) if isinstance(item, str) else item for item in feedback]
+ feedback = [ChatMessage("user", [item]) if isinstance(item, str) else item for item in feedback]
return MagenticPlanReviewResponse(review=feedback)
@@ -1118,7 +1119,7 @@ async def _run_inner_loop_helper(
# Add instruction to conversation (assistant guidance)
instruction_msg = ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
text=str(instruction),
author_name=MAGENTIC_MANAGER_NAME,
)
@@ -1227,7 +1228,7 @@ async def _check_within_limits_or_complete(self, ctx: WorkflowContext[Never, lis
await ctx.yield_output([
*self._magentic_context.chat_history,
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
text=f"Workflow terminated due to reaching maximum {limit_type} count.",
author_name=MAGENTIC_MANAGER_NAME,
),
@@ -1810,7 +1811,7 @@ def with_manager(
class MyManager(MagenticManagerBase):
async def plan(self, context: MagenticContext) -> ChatMessage:
# Custom planning logic
- return ChatMessage(role=Role.ASSISTANT, text="...")
+ return ChatMessage("assistant", ["..."])
manager = MyManager()
diff --git a/python/packages/core/agent_framework/_workflows/_message_utils.py b/python/packages/core/agent_framework/_workflows/_message_utils.py
index ad4a9b55f6..78a2f3f626 100644
--- a/python/packages/core/agent_framework/_workflows/_message_utils.py
+++ b/python/packages/core/agent_framework/_workflows/_message_utils.py
@@ -4,7 +4,7 @@
from collections.abc import Sequence
-from agent_framework import ChatMessage, Role
+from agent_framework import ChatMessage
def normalize_messages_input(
@@ -22,7 +22,7 @@ def normalize_messages_input(
return []
if isinstance(messages, str):
- return [ChatMessage(role=Role.USER, text=messages)]
+ return [ChatMessage("user", [messages])]
if isinstance(messages, ChatMessage):
return [messages]
@@ -30,7 +30,7 @@ def normalize_messages_input(
normalized: list[ChatMessage] = []
for item in messages:
if isinstance(item, str):
- normalized.append(ChatMessage(role=Role.USER, text=item))
+ normalized.append(ChatMessage("user", [item]))
elif isinstance(item, ChatMessage):
normalized.append(item)
else:
diff --git a/python/packages/core/agent_framework/_workflows/_orchestration_request_info.py b/python/packages/core/agent_framework/_workflows/_orchestration_request_info.py
index dc1e282a12..cc4b1ed15d 100644
--- a/python/packages/core/agent_framework/_workflows/_orchestration_request_info.py
+++ b/python/packages/core/agent_framework/_workflows/_orchestration_request_info.py
@@ -3,7 +3,7 @@
from dataclasses import dataclass
from .._agents import AgentProtocol
-from .._types import ChatMessage, Role
+from .._types import ChatMessage
from ._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse
from ._agent_utils import resolve_agent_id
from ._executor import Executor, handler
@@ -72,7 +72,7 @@ def from_strings(texts: list[str]) -> "AgentRequestInfoResponse":
Returns:
AgentRequestInfoResponse instance.
"""
- return AgentRequestInfoResponse(messages=[ChatMessage(role=Role.USER, text=text) for text in texts])
+ return AgentRequestInfoResponse(messages=[ChatMessage("user", [text]) for text in texts])
@staticmethod
def approve() -> "AgentRequestInfoResponse":
diff --git a/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py b/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py
index 82f6532ea2..0d74f53c39 100644
--- a/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py
+++ b/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py
@@ -8,7 +8,7 @@
import logging
-from .._types import ChatMessage, Role
+from .._types import ChatMessage
logger = logging.getLogger(__name__)
@@ -24,7 +24,7 @@ def clean_conversation_for_handoff(conversation: list[ChatMessage]) -> list[Chat
Removes:
- function_approval_request and function_call from assistant messages
- - Tool response messages (Role.TOOL)
+ - Tool response messages (role="tool")
- Messages with only tool calls and no text
Preserves:
@@ -40,7 +40,7 @@ def clean_conversation_for_handoff(conversation: list[ChatMessage]) -> list[Chat
cleaned: list[ChatMessage] = []
for msg in conversation:
# Skip tool response messages entirely
- if msg.role == Role.TOOL:
+ if msg.role == "tool":
continue
# Check for tool-related content
@@ -85,11 +85,11 @@ def create_completion_message(
reason: Reason for completion (for default text generation)
Returns:
- ChatMessage with ASSISTANT role
+ ChatMessage with assistant role
"""
message_text = text or f"Conversation {reason}."
return ChatMessage(
- role=Role.ASSISTANT,
- text=message_text,
+ "assistant",
+ [message_text],
author_name=author_name,
)
diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py
index bd14dc6bcc..dfd0331282 100644
--- a/python/packages/core/agent_framework/_workflows/_workflow.py
+++ b/python/packages/core/agent_framework/_workflows/_workflow.py
@@ -851,7 +851,7 @@ def as_agent(self, name: str | None = None) -> WorkflowAgent:
The returned agent converts standard agent inputs (strings, ChatMessage, or lists of these)
into a list[ChatMessage] that is passed to the workflow's start executor. This conversion
happens in WorkflowAgent._normalize_messages() which transforms:
- - str -> [ChatMessage(role=USER, text=str)]
+ - str -> [ChatMessage(USER, [str])]
- ChatMessage -> [ChatMessage]
- list[str | ChatMessage] -> list[ChatMessage] (with string elements converted)
diff --git a/python/packages/core/agent_framework/observability.py b/python/packages/core/agent_framework/observability.py
index 2d294daddd..8e2d736c42 100644
--- a/python/packages/core/agent_framework/observability.py
+++ b/python/packages/core/agent_framework/observability.py
@@ -41,7 +41,6 @@
ChatResponse,
ChatResponseUpdate,
Content,
- FinishReason,
)
__all__ = [
@@ -1211,7 +1210,7 @@ async def trace_get_streaming_response(
duration = (end_time_stamp or perf_counter()) - start_time_stamp
from ._types import ChatResponse
- response = ChatResponse.from_chat_response_updates(all_updates)
+ response = ChatResponse.from_updates(all_updates)
attributes = _get_response_attributes(attributes, response, duration=duration)
_capture_response(
span=span,
@@ -1450,7 +1449,7 @@ async def trace_run_streaming(
capture_exception(span=span, exception=exception, timestamp=time_ns())
raise
else:
- response = AgentResponse.from_agent_run_response_updates(all_updates)
+ response = AgentResponse.from_updates(all_updates)
attributes = _get_response_attributes(attributes, response, capture_usage=capture_usage)
_capture_response(span=span, attributes=attributes)
if OBSERVABILITY_SETTINGS.SENSITIVE_DATA_ENABLED and response.messages:
@@ -1715,7 +1714,7 @@ def _capture_messages(
messages: "str | ChatMessage | list[str] | list[ChatMessage]",
system_instructions: str | list[str] | None = None,
output: bool = False,
- finish_reason: "FinishReason | None" = None,
+ finish_reason: str | None = None,
) -> None:
"""Log messages with extra information."""
from ._types import prepare_messages
@@ -1730,13 +1729,13 @@ def _capture_messages(
logger.info(
otel_message,
extra={
- OtelAttr.EVENT_NAME: OtelAttr.CHOICE if output else ROLE_EVENT_MAP.get(message.role.value),
+ OtelAttr.EVENT_NAME: OtelAttr.CHOICE if output else ROLE_EVENT_MAP.get(message.role),
OtelAttr.PROVIDER_NAME: provider_name,
ChatMessageListTimestampFilter.INDEX_KEY: index,
},
)
if finish_reason:
- otel_messages[-1]["finish_reason"] = FINISH_REASON_MAP[finish_reason.value]
+ otel_messages[-1]["finish_reason"] = FINISH_REASON_MAP[finish_reason]
span.set_attribute(OtelAttr.OUTPUT_MESSAGES if output else OtelAttr.INPUT_MESSAGES, json.dumps(otel_messages))
if system_instructions:
if not isinstance(system_instructions, list):
@@ -1747,7 +1746,7 @@ def _capture_messages(
def _to_otel_message(message: "ChatMessage") -> dict[str, Any]:
"""Create a otel representation of a message."""
- return {"role": message.role.value, "parts": [_to_otel_part(content) for content in message.contents]}
+ return {"role": message.role, "parts": [_to_otel_part(content) for content in message.contents]}
def _to_otel_part(content: "Content") -> dict[str, Any] | None:
@@ -1806,7 +1805,9 @@ def _get_response_attributes(
getattr(response.raw_representation, "finish_reason", None) if response.raw_representation else None
)
if finish_reason:
- attributes[OtelAttr.FINISH_REASONS] = json.dumps([finish_reason.value])
+ # Handle both string and object with .value attribute for backward compatibility
+ finish_reason_str = finish_reason.value if hasattr(finish_reason, "value") else finish_reason
+ attributes[OtelAttr.FINISH_REASONS] = json.dumps([finish_reason_str])
if model_id := getattr(response, "model_id", None):
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model_id
if capture_usage and (usage := response.usage_details):
diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py
index 22852bea53..f653e22d42 100644
--- a/python/packages/core/agent_framework/openai/_assistants_client.py
+++ b/python/packages/core/agent_framework/openai/_assistants_client.py
@@ -41,7 +41,6 @@
ChatResponse,
ChatResponseUpdate,
Content,
- Role,
UsageDetails,
prepare_function_call_results,
)
@@ -345,7 +344,7 @@ async def _inner_get_response(
options: dict[str, Any],
**kwargs: Any,
) -> ChatResponse:
- return await ChatResponse.from_chat_response_generator(
+ return await ChatResponse.from_update_generator(
updates=self._inner_get_streaming_response(messages=messages, options=options, **kwargs),
output_format_type=options.get("response_format"),
)
@@ -479,19 +478,19 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter
message_id=response_id,
raw_representation=response.data,
response_id=response_id,
- role=Role.ASSISTANT,
+ role="assistant",
)
elif response.event == "thread.run.step.created" and isinstance(response.data, RunStep):
response_id = response.data.run_id
elif response.event == "thread.message.delta" and isinstance(response.data, MessageDeltaEvent):
delta = response.data.delta
- role = Role.USER if delta.role == "user" else Role.ASSISTANT
+ role = "user" if delta.role == "user" else "assistant"
for delta_block in delta.content or []:
if isinstance(delta_block, TextDeltaBlock) and delta_block.text and delta_block.text.value:
yield ChatResponseUpdate(
role=role,
- text=delta_block.text.value,
+ contents=[Content.from_text(text=delta_block.text.value)],
conversation_id=thread_id,
message_id=response_id,
raw_representation=response.data,
@@ -501,7 +500,7 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter
contents = self._parse_function_calls_from_assistants(response.data, response_id)
if contents:
yield ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=contents,
conversation_id=thread_id,
message_id=response_id,
@@ -522,7 +521,7 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter
)
)
yield ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[usage_content],
conversation_id=thread_id,
message_id=response_id,
@@ -536,7 +535,7 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter
message_id=response_id,
raw_representation=response.data,
response_id=response_id,
- role=Role.ASSISTANT,
+ role="assistant",
)
def _parse_function_calls_from_assistants(self, event_data: Run, response_id: str | None) -> list[Content]:
@@ -670,7 +669,7 @@ def _prepare_options(
# since there is no such message roles in OpenAI Assistants.
# All other messages are added 1:1.
for chat_message in messages:
- if chat_message.role.value in ["system", "developer"]:
+ if chat_message.role in ["system", "developer"]:
for text_content in [content for content in chat_message.contents if content.type == "text"]:
text = getattr(text_content, "text", None)
if text:
@@ -697,7 +696,7 @@ def _prepare_options(
additional_messages = []
additional_messages.append(
AdditionalMessage(
- role="assistant" if chat_message.role == Role.ASSISTANT else "user",
+ role="assistant" if chat_message.role == "assistant" else "user",
content=message_contents,
)
)
diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py
index e70b4790f6..1a0529f50f 100644
--- a/python/packages/core/agent_framework/openai/_chat_client.py
+++ b/python/packages/core/agent_framework/openai/_chat_client.py
@@ -26,8 +26,6 @@
ChatResponse,
ChatResponseUpdate,
Content,
- FinishReason,
- Role,
UsageDetails,
prepare_function_call_results,
)
@@ -285,11 +283,11 @@ def _parse_response_from_openai(self, response: ChatCompletion, options: dict[st
"""Parse a response from OpenAI into a ChatResponse."""
response_metadata = self._get_metadata_from_chat_response(response)
messages: list[ChatMessage] = []
- finish_reason: FinishReason | None = None
+ finish_reason: str | None = None
for choice in response.choices:
response_metadata.update(self._get_metadata_from_chat_choice(choice))
if choice.finish_reason:
- finish_reason = FinishReason(value=choice.finish_reason)
+ finish_reason = choice.finish_reason
contents: list[Content] = []
if text_content := self._parse_text_from_openai(choice):
contents.append(text_content)
@@ -297,7 +295,7 @@ def _parse_response_from_openai(self, response: ChatCompletion, options: dict[st
contents.extend(parsed_tool_calls)
if reasoning_details := getattr(choice.message, "reasoning_details", None):
contents.append(Content.from_text_reasoning(protected_data=json.dumps(reasoning_details)))
- messages.append(ChatMessage(role="assistant", contents=contents))
+ messages.append(ChatMessage("assistant", contents))
return ChatResponse(
response_id=response.id,
created_at=datetime.fromtimestamp(response.created, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
@@ -317,7 +315,7 @@ def _parse_response_update_from_openai(
chunk_metadata = self._get_metadata_from_streaming_chat_response(chunk)
if chunk.usage:
return ChatResponseUpdate(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_usage(
usage_details=self._parse_usage_from_openai(chunk.usage), raw_representation=chunk
@@ -329,12 +327,12 @@ def _parse_response_update_from_openai(
message_id=chunk.id,
)
contents: list[Content] = []
- finish_reason: FinishReason | None = None
+ finish_reason: str | None = None
for choice in chunk.choices:
chunk_metadata.update(self._get_metadata_from_chat_choice(choice))
contents.extend(self._parse_tool_calls_from_openai(choice))
if choice.finish_reason:
- finish_reason = FinishReason(value=choice.finish_reason)
+ finish_reason = choice.finish_reason
if text_content := self._parse_text_from_openai(choice):
contents.append(text_content)
@@ -343,7 +341,7 @@ def _parse_response_update_from_openai(
return ChatResponseUpdate(
created_at=datetime.fromtimestamp(chunk.created, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
contents=contents,
- role=Role.ASSISTANT,
+ role="assistant",
model_id=chunk.model,
additional_properties=chunk_metadata,
finish_reason=finish_reason,
@@ -430,7 +428,7 @@ def _prepare_messages_for_openai(
Allowing customization of the key names for role/author, and optionally overriding the role.
- Role.TOOL messages need to be formatted different than system/user/assistant messages:
+ "tool" messages need to be formatted different than system/user/assistant messages:
They require a "tool_call_id" and (function) "name" key, and the "metadata" key should
be removed. The "encoding" key should also be removed.
@@ -459,9 +457,9 @@ def _prepare_message_for_openai(self, message: ChatMessage) -> list[dict[str, An
continue
args: dict[str, Any] = {
- "role": message.role.value if isinstance(message.role, Role) else message.role,
+ "role": message.role,
}
- if message.author_name and message.role != Role.TOOL:
+ if message.author_name and message.role != "tool":
args["name"] = message.author_name
if "reasoning_details" in message.additional_properties and (
details := message.additional_properties["reasoning_details"]
diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py
index 9a3436e5ce..125ff1cd20 100644
--- a/python/packages/core/agent_framework/openai/_responses_client.py
+++ b/python/packages/core/agent_framework/openai/_responses_client.py
@@ -54,7 +54,6 @@
ChatResponse,
ChatResponseUpdate,
Content,
- Role,
TextSpanRegion,
UsageDetails,
detect_media_type_from_base64,
@@ -610,7 +609,7 @@ def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) ->
Allowing customization of the key names for role/author, and optionally overriding the role.
- Role.TOOL messages need to be formatted different than system/user/assistant messages:
+ "tool" messages need to be formatted different than system/user/assistant messages:
They require a "tool_call_id" and (function) "name" key, and the "metadata" key should
be removed. The "encoding" key should also be removed.
@@ -643,7 +642,7 @@ def _prepare_message_for_openai(
"""Prepare a chat message for the OpenAI Responses API format."""
all_messages: list[dict[str, Any]] = []
args: dict[str, Any] = {
- "role": message.role.value if isinstance(message.role, Role) else message.role,
+ "role": message.role,
}
for content in message.contents:
match content.type:
@@ -669,7 +668,7 @@ def _prepare_message_for_openai(
def _prepare_content_for_openai(
self,
- role: Role,
+ role: str,
content: Content,
call_id_to_id: dict[str, str],
) -> dict[str, Any]:
@@ -677,7 +676,7 @@ def _prepare_content_for_openai(
match content.type:
case "text":
return {
- "type": "output_text" if role == Role.ASSISTANT else "input_text",
+ "type": "output_text" if role == "assistant" else "input_text",
"text": content.text,
}
case "text_reasoning":
@@ -1027,7 +1026,7 @@ def _parse_response_from_openai(
)
case _:
logger.debug("Unparsed output of type: %s: %s", item.type, item)
- response_message = ChatMessage(role="assistant", contents=contents)
+ response_message = ChatMessage("assistant", contents)
args: dict[str, Any] = {
"response_id": response.id,
"created_at": datetime.fromtimestamp(response.created_at, tz=timezone.utc).strftime(
@@ -1387,7 +1386,7 @@ def _get_ann_value(key: str) -> Any:
contents=contents,
conversation_id=conversation_id,
response_id=response_id,
- role=Role.ASSISTANT,
+ role="assistant",
model_id=model,
additional_properties=metadata,
raw_representation=event,
diff --git a/python/packages/core/tests/azure/test_azure_assistants_client.py b/python/packages/core/tests/azure/test_azure_assistants_client.py
index 32f1b13252..0187e98ddc 100644
--- a/python/packages/core/tests/azure/test_azure_assistants_client.py
+++ b/python/packages/core/tests/azure/test_azure_assistants_client.py
@@ -277,7 +277,7 @@ async def test_azure_assistants_client_get_response() -> None:
"It's a beautiful day for outdoor activities.",
)
)
- messages.append(ChatMessage(role="user", text="What's the weather like today?"))
+ messages.append(ChatMessage("user", ["What's the weather like today?"]))
# Test that the client can be used to get a response
response = await azure_assistants_client.get_response(messages=messages)
@@ -295,7 +295,7 @@ async def test_azure_assistants_client_get_response_tools() -> None:
assert isinstance(azure_assistants_client, ChatClientProtocol)
messages: list[ChatMessage] = []
- messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?"))
+ messages.append(ChatMessage("user", ["What's the weather like in Seattle?"]))
# Test that the client can be used to get a response
response = await azure_assistants_client.get_response(
@@ -323,7 +323,7 @@ async def test_azure_assistants_client_streaming() -> None:
"It's a beautiful day for outdoor activities.",
)
)
- messages.append(ChatMessage(role="user", text="What's the weather like today?"))
+ messages.append(ChatMessage("user", ["What's the weather like today?"]))
# Test that the client can be used to get a response
response = azure_assistants_client.get_streaming_response(messages=messages)
@@ -347,7 +347,7 @@ async def test_azure_assistants_client_streaming_tools() -> None:
assert isinstance(azure_assistants_client, ChatClientProtocol)
messages: list[ChatMessage] = []
- messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?"))
+ messages.append(ChatMessage("user", ["What's the weather like in Seattle?"]))
# Test that the client can be used to get a response
response = azure_assistants_client.get_streaming_response(
@@ -372,7 +372,7 @@ async def test_azure_assistants_client_with_existing_assistant() -> None:
# First create an assistant to use in the test
async with AzureOpenAIAssistantsClient(credential=AzureCliCredential()) as temp_client:
# Get the assistant ID by triggering assistant creation
- messages = [ChatMessage(role="user", text="Hello")]
+ messages = [ChatMessage("user", ["Hello"])]
await temp_client.get_response(messages=messages)
assistant_id = temp_client.assistant_id
@@ -383,7 +383,7 @@ async def test_azure_assistants_client_with_existing_assistant() -> None:
assert isinstance(azure_assistants_client, ChatClientProtocol)
assert azure_assistants_client.assistant_id == assistant_id
- messages = [ChatMessage(role="user", text="What can you do?")]
+ messages = [ChatMessage("user", ["What can you do?"])]
# Test that the client can be used to get a response
response = await azure_assistants_client.get_response(messages=messages)
diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py
index caba327dc7..99df3bbdf5 100644
--- a/python/packages/core/tests/azure/test_azure_chat_client.py
+++ b/python/packages/core/tests/azure/test_azure_chat_client.py
@@ -665,7 +665,7 @@ async def test_azure_openai_chat_client_response() -> None:
"of climate change.",
)
)
- messages.append(ChatMessage(role="user", text="who are Emily and David?"))
+ messages.append(ChatMessage("user", ["who are Emily and David?"]))
# Test that the client can be used to get a response
response = await azure_chat_client.get_response(messages=messages)
@@ -686,7 +686,7 @@ async def test_azure_openai_chat_client_response_tools() -> None:
assert isinstance(azure_chat_client, ChatClientProtocol)
messages: list[ChatMessage] = []
- messages.append(ChatMessage(role="user", text="who are Emily and David?"))
+ messages.append(ChatMessage("user", ["who are Emily and David?"]))
# Test that the client can be used to get a response
response = await azure_chat_client.get_response(
@@ -716,7 +716,7 @@ async def test_azure_openai_chat_client_streaming() -> None:
"of climate change.",
)
)
- messages.append(ChatMessage(role="user", text="who are Emily and David?"))
+ messages.append(ChatMessage("user", ["who are Emily and David?"]))
# Test that the client can be used to get a response
response = azure_chat_client.get_streaming_response(messages=messages)
@@ -742,7 +742,7 @@ async def test_azure_openai_chat_client_streaming_tools() -> None:
assert isinstance(azure_chat_client, ChatClientProtocol)
messages: list[ChatMessage] = []
- messages.append(ChatMessage(role="user", text="who are Emily and David?"))
+ messages.append(ChatMessage("user", ["who are Emily and David?"]))
# Test that the client can be used to get a response
response = azure_chat_client.get_streaming_response(
diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py
index 35d92c7b98..13dfee819d 100644
--- a/python/packages/core/tests/azure/test_azure_responses_client.py
+++ b/python/packages/core/tests/azure/test_azure_responses_client.py
@@ -221,14 +221,14 @@ async def test_integration_options(
# Prepare test message
if option_name == "tools" or option_name == "tool_choice":
# Use weather-related prompt for tool tests
- messages = [ChatMessage(role="user", text="What is the weather in Seattle?")]
+ messages = [ChatMessage("user", ["What is the weather in Seattle?"])]
elif option_name == "response_format":
# Use prompt that works well with structured output
- messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")]
- messages.append(ChatMessage(role="user", text="What is the weather in Seattle?"))
+ messages = [ChatMessage("user", ["The weather in Seattle is sunny"])]
+ messages.append(ChatMessage("user", ["What is the weather in Seattle?"]))
else:
# Generic prompt for simple options
- messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")]
+ messages = [ChatMessage("user", ["Say 'Hello World' briefly."])]
# Build options dict
options: dict[str, Any] = {option_name: option_value}
@@ -245,7 +245,7 @@ async def test_integration_options(
)
output_format = option_value if option_name == "response_format" else None
- response = await ChatResponse.from_chat_response_generator(response_gen, output_format_type=output_format)
+ response = await ChatResponse.from_update_generator(response_gen, output_format_type=output_format)
else:
# Test non-streaming mode
response = await client.get_response(
@@ -293,7 +293,7 @@ async def test_integration_web_search() -> None:
},
}
if streaming:
- response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content))
+ response = await ChatResponse.from_update_generator(client.get_streaming_response(**content))
else:
response = await client.get_response(**content)
@@ -318,7 +318,7 @@ async def test_integration_web_search() -> None:
},
}
if streaming:
- response = await ChatResponse.from_chat_response_generator(client.get_streaming_response(**content))
+ response = await ChatResponse.from_update_generator(client.get_streaming_response(**content))
else:
response = await client.get_response(**content)
assert response.text is not None
@@ -367,7 +367,7 @@ async def test_integration_client_file_search_streaming() -> None:
)
assert response is not None
- full_response = await ChatResponse.from_chat_response_generator(response)
+ full_response = await ChatResponse.from_update_generator(response)
assert "sunny" in full_response.text.lower()
assert "75" in full_response.text
finally:
diff --git a/python/packages/core/tests/core/conftest.py b/python/packages/core/tests/core/conftest.py
index ed8de28c11..c5b7be9687 100644
--- a/python/packages/core/tests/core/conftest.py
+++ b/python/packages/core/tests/core/conftest.py
@@ -21,7 +21,6 @@
ChatResponse,
ChatResponseUpdate,
Content,
- Role,
ToolProtocol,
tool,
use_chat_middleware,
@@ -95,7 +94,7 @@ async def get_response(
self.call_count += 1
if self.responses:
return self.responses.pop(0)
- return ChatResponse(messages=ChatMessage(role="assistant", text="test response"))
+ return ChatResponse(messages=ChatMessage("assistant", ["test response"]))
async def get_streaming_response(
self,
@@ -108,7 +107,7 @@ async def get_streaming_response(
for update in self.streaming_responses.pop(0):
yield update
else:
- yield ChatResponseUpdate(text=Content.from_text(text="test streaming response "), role="assistant")
+ yield ChatResponseUpdate(contents=[Content.from_text(text="test streaming response ")], role="assistant")
yield ChatResponseUpdate(contents=[Content.from_text(text="another update")], role="assistant")
@@ -143,7 +142,7 @@ async def _inner_get_response(
logger.debug(f"Running base chat client inner, with: {messages=}, {options=}, {kwargs=}")
self.call_count += 1
if not self.run_responses:
- return ChatResponse(messages=ChatMessage(role="assistant", text=f"test response - {messages[-1].text}"))
+ return ChatResponse(messages=ChatMessage("assistant", [f"test response - {messages[-1].text}"]))
response = self.run_responses.pop(0)
@@ -168,10 +167,14 @@ async def _inner_get_streaming_response(
) -> AsyncIterable[ChatResponseUpdate]:
logger.debug(f"Running base chat client inner stream, with: {messages=}, {options=}, {kwargs=}")
if not self.streaming_responses:
- yield ChatResponseUpdate(text=f"update - {messages[0].text}", role="assistant")
+ yield ChatResponseUpdate(
+ contents=[Content.from_text(text=f"update - {messages[0].text}")], role="assistant"
+ )
return
if options.get("tool_choice") == "none":
- yield ChatResponseUpdate(text="I broke out of the function invocation loop...", role="assistant")
+ yield ChatResponseUpdate(
+ contents=[Content.from_text(text="I broke out of the function invocation loop...")], role="assistant"
+ )
return
response = self.streaming_responses.pop(0)
for update in response:
@@ -233,7 +236,7 @@ async def run(
**kwargs: Any,
) -> AgentResponse:
logger.debug(f"Running mock agent, with: {messages=}, {thread=}, {kwargs=}")
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("Response")])])
+ return AgentResponse(messages=[ChatMessage("assistant", [Content.from_text("Response")])])
async def run_stream(
self,
diff --git a/python/packages/core/tests/core/test_agents.py b/python/packages/core/tests/core/test_agents.py
index 1f4d1cadce..09ef1bbbe1 100644
--- a/python/packages/core/tests/core/test_agents.py
+++ b/python/packages/core/tests/core/test_agents.py
@@ -24,7 +24,6 @@
Context,
ContextProvider,
HostedCodeInterpreterTool,
- Role,
ToolProtocol,
tool,
)
@@ -43,7 +42,7 @@ def test_agent_type(agent: AgentProtocol) -> None:
async def test_agent_run(agent: AgentProtocol) -> None:
response = await agent.run("test")
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert response.messages[0].text == "Response"
@@ -104,12 +103,12 @@ async def test_chat_client_agent_get_new_thread(chat_client: ChatClientProtocol)
async def test_chat_client_agent_prepare_thread_and_messages(chat_client: ChatClientProtocol) -> None:
agent = ChatAgent(chat_client=chat_client)
- message = ChatMessage(role=Role.USER, text="Hello")
+ message = ChatMessage("user", ["Hello"])
thread = AgentThread(message_store=ChatMessageStore(messages=[message]))
_, _, result_messages = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage]
thread=thread,
- input_messages=[ChatMessage(role=Role.USER, text="Test")],
+ input_messages=[ChatMessage("user", ["Test"])],
)
assert len(result_messages) == 2
@@ -127,7 +126,7 @@ async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: Ch
_, prepared_chat_options, _ = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage]
thread=thread,
- input_messages=[ChatMessage(role=Role.USER, text="Test")],
+ input_messages=[ChatMessage("user", ["Test"])],
)
assert prepared_chat_options.get("tools") is not None
@@ -139,7 +138,7 @@ async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: Ch
async def test_chat_client_agent_update_thread_id(chat_client_base: ChatClientProtocol) -> None:
mock_response = ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("test response")])],
+ messages=[ChatMessage("assistant", [Content.from_text("test response")])],
conversation_id="123",
)
chat_client_base.run_responses = [mock_response]
@@ -202,11 +201,7 @@ async def test_chat_client_agent_author_name_as_agent_name(chat_client: ChatClie
async def test_chat_client_agent_author_name_is_used_from_response(chat_client_base: ChatClientProtocol) -> None:
chat_client_base.run_responses = [
ChatResponse(
- messages=[
- ChatMessage(
- role=Role.ASSISTANT, contents=[Content.from_text("test response")], author_name="TestAuthor"
- )
- ]
+ messages=[ChatMessage("assistant", [Content.from_text("test response")], author_name="TestAuthor")]
)
]
@@ -256,7 +251,7 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], *
async def test_chat_agent_context_providers_model_invoking(chat_client: ChatClientProtocol) -> None:
"""Test that context providers' invoking is called during agent run."""
- mock_provider = MockContextProvider(messages=[ChatMessage(role=Role.SYSTEM, text="Test context instructions")])
+ mock_provider = MockContextProvider(messages=[ChatMessage("system", ["Test context instructions"])])
agent = ChatAgent(chat_client=chat_client, context_provider=mock_provider)
await agent.run("Hello")
@@ -269,7 +264,7 @@ async def test_chat_agent_context_providers_thread_created(chat_client_base: Cha
mock_provider = MockContextProvider()
chat_client_base.run_responses = [
ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("test response")])],
+ messages=[ChatMessage("assistant", [Content.from_text("test response")])],
conversation_id="test-thread-id",
)
]
@@ -296,19 +291,19 @@ async def test_chat_agent_context_providers_messages_adding(chat_client: ChatCli
async def test_chat_agent_context_instructions_in_messages(chat_client: ChatClientProtocol) -> None:
"""Test that AI context instructions are included in messages."""
- mock_provider = MockContextProvider(messages=[ChatMessage(role="system", text="Context-specific instructions")])
+ mock_provider = MockContextProvider(messages=[ChatMessage("system", ["Context-specific instructions"])])
agent = ChatAgent(chat_client=chat_client, instructions="Agent instructions", context_provider=mock_provider)
# We need to test the _prepare_thread_and_messages method directly
_, _, messages = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage]
- thread=None, input_messages=[ChatMessage(role=Role.USER, text="Hello")]
+ thread=None, input_messages=[ChatMessage("user", ["Hello"])]
)
# Should have context instructions, and user message
assert len(messages) == 2
- assert messages[0].role == Role.SYSTEM
+ assert messages[0].role == "system"
assert messages[0].text == "Context-specific instructions"
- assert messages[1].role == Role.USER
+ assert messages[1].role == "user"
assert messages[1].text == "Hello"
# instructions system message is added by a chat_client
@@ -319,18 +314,18 @@ async def test_chat_agent_no_context_instructions(chat_client: ChatClientProtoco
agent = ChatAgent(chat_client=chat_client, instructions="Agent instructions", context_provider=mock_provider)
_, _, messages = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage]
- thread=None, input_messages=[ChatMessage(role=Role.USER, text="Hello")]
+ thread=None, input_messages=[ChatMessage("user", ["Hello"])]
)
# Should have agent instructions and user message only
assert len(messages) == 1
- assert messages[0].role == Role.USER
+ assert messages[0].role == "user"
assert messages[0].text == "Hello"
async def test_chat_agent_run_stream_context_providers(chat_client: ChatClientProtocol) -> None:
"""Test that context providers work with run_stream method."""
- mock_provider = MockContextProvider(messages=[ChatMessage(role=Role.SYSTEM, text="Stream context instructions")])
+ mock_provider = MockContextProvider(messages=[ChatMessage("system", ["Stream context instructions"])])
agent = ChatAgent(chat_client=chat_client, context_provider=mock_provider)
# Collect all stream updates
@@ -350,7 +345,7 @@ async def test_chat_agent_context_providers_with_thread_service_id(chat_client_b
mock_provider = MockContextProvider()
chat_client_base.run_responses = [
ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("test response")])],
+ messages=[ChatMessage("assistant", [Content.from_text("test response")])],
conversation_id="service-thread-123",
)
]
@@ -585,7 +580,7 @@ def echo_thread_info(text: str, **kwargs: Any) -> str: # type: ignore[reportUnk
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
agent = ChatAgent(
@@ -928,7 +923,7 @@ async def invoking(self, messages, **kwargs):
# Run the agent and verify context tools are added
_, options, _ = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage]
- thread=None, input_messages=[ChatMessage(role=Role.USER, text="Hello")]
+ thread=None, input_messages=[ChatMessage("user", ["Hello"])]
)
# The context tools should now be in the options
@@ -952,7 +947,7 @@ async def invoking(self, messages, **kwargs):
# Run the agent and verify context instructions are available
_, options, _ = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage]
- thread=None, input_messages=[ChatMessage(role=Role.USER, text="Hello")]
+ thread=None, input_messages=[ChatMessage("user", ["Hello"])]
)
# The context instructions should now be in the options
@@ -972,7 +967,7 @@ async def test_chat_agent_raises_on_conversation_id_mismatch(chat_client_base: C
with pytest.raises(AgentExecutionException, match="conversation_id set on the agent is different"):
await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage]
- thread=thread, input_messages=[ChatMessage(role=Role.USER, text="Hello")]
+ thread=thread, input_messages=[ChatMessage("user", ["Hello"])]
)
diff --git a/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py b/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py
index 39f441eb49..e3457f6625 100644
--- a/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py
+++ b/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py
@@ -28,7 +28,7 @@ async def capture_middleware(
# Setup mock response
chat_client.responses = [
- ChatResponse(messages=[ChatMessage(role="assistant", text="Response from sub-agent")]),
+ ChatResponse(messages=[ChatMessage("assistant", ["Response from sub-agent"])]),
]
# Create sub-agent with middleware
@@ -70,7 +70,7 @@ async def capture_middleware(
# Setup mock response
chat_client.responses = [
- ChatResponse(messages=[ChatMessage(role="assistant", text="Response from sub-agent")]),
+ ChatResponse(messages=[ChatMessage("assistant", ["Response from sub-agent"])]),
]
sub_agent = ChatAgent(
@@ -122,8 +122,8 @@ async def capture_middleware(
)
]
),
- ChatResponse(messages=[ChatMessage(role="assistant", text="Response from agent_c")]),
- ChatResponse(messages=[ChatMessage(role="assistant", text="Response from agent_b")]),
+ ChatResponse(messages=[ChatMessage("assistant", ["Response from agent_c"])]),
+ ChatResponse(messages=[ChatMessage("assistant", ["Response from agent_b"])]),
]
# Create agent C (bottom level)
@@ -173,7 +173,7 @@ async def capture_middleware(
from agent_framework import ChatResponseUpdate
chat_client.streaming_responses = [
- [ChatResponseUpdate(text=Content.from_text(text="Streaming response"), role="assistant")],
+ [ChatResponseUpdate(contents=[Content.from_text(text="Streaming response")], role="assistant")],
]
sub_agent = ChatAgent(
@@ -204,7 +204,7 @@ async def test_as_tool_empty_kwargs_still_works(self, chat_client: MockChatClien
"""Test that as_tool works correctly when no extra kwargs are provided."""
# Setup mock response
chat_client.responses = [
- ChatResponse(messages=[ChatMessage(role="assistant", text="Response from agent")]),
+ ChatResponse(messages=[ChatMessage("assistant", ["Response from agent"])]),
]
sub_agent = ChatAgent(
@@ -233,7 +233,7 @@ async def capture_middleware(
# Setup mock response
chat_client.responses = [
- ChatResponse(messages=[ChatMessage(role="assistant", text="Response with options")]),
+ ChatResponse(messages=[ChatMessage("assistant", ["Response with options"])]),
]
sub_agent = ChatAgent(
@@ -280,8 +280,8 @@ async def capture_middleware(
# Setup mock responses for both calls
chat_client.responses = [
- ChatResponse(messages=[ChatMessage(role="assistant", text="First response")]),
- ChatResponse(messages=[ChatMessage(role="assistant", text="Second response")]),
+ ChatResponse(messages=[ChatMessage("assistant", ["First response"])]),
+ ChatResponse(messages=[ChatMessage("assistant", ["Second response"])]),
]
sub_agent = ChatAgent(
@@ -327,7 +327,7 @@ async def capture_middleware(
# Setup mock response
chat_client.responses = [
- ChatResponse(messages=[ChatMessage(role="assistant", text="Response from sub-agent")]),
+ ChatResponse(messages=[ChatMessage("assistant", ["Response from sub-agent"])]),
]
sub_agent = ChatAgent(
diff --git a/python/packages/core/tests/core/test_clients.py b/python/packages/core/tests/core/test_clients.py
index 67ecd54a8d..c151451227 100644
--- a/python/packages/core/tests/core/test_clients.py
+++ b/python/packages/core/tests/core/test_clients.py
@@ -7,7 +7,6 @@
BaseChatClient,
ChatClientProtocol,
ChatMessage,
- Role,
)
@@ -16,15 +15,15 @@ def test_chat_client_type(chat_client: ChatClientProtocol):
async def test_chat_client_get_response(chat_client: ChatClientProtocol):
- response = await chat_client.get_response(ChatMessage(role="user", text="Hello"))
+ response = await chat_client.get_response(ChatMessage("user", ["Hello"]))
assert response.text == "test response"
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
async def test_chat_client_get_streaming_response(chat_client: ChatClientProtocol):
- async for update in chat_client.get_streaming_response(ChatMessage(role="user", text="Hello")):
+ async for update in chat_client.get_streaming_response(ChatMessage("user", ["Hello"])):
assert update.text == "test streaming response " or update.text == "another update"
- assert update.role == Role.ASSISTANT
+ assert update.role == "assistant"
def test_base_client(chat_client_base: ChatClientProtocol):
@@ -33,13 +32,13 @@ def test_base_client(chat_client_base: ChatClientProtocol):
async def test_base_client_get_response(chat_client_base: ChatClientProtocol):
- response = await chat_client_base.get_response(ChatMessage(role="user", text="Hello"))
- assert response.messages[0].role == Role.ASSISTANT
+ response = await chat_client_base.get_response(ChatMessage("user", ["Hello"]))
+ assert response.messages[0].role == "assistant"
assert response.messages[0].text == "test response - Hello"
async def test_base_client_get_streaming_response(chat_client_base: ChatClientProtocol):
- async for update in chat_client_base.get_streaming_response(ChatMessage(role="user", text="Hello")):
+ async for update in chat_client_base.get_streaming_response(ChatMessage("user", ["Hello"])):
assert update.text == "update - Hello" or update.text == "another update"
@@ -54,17 +53,17 @@ async def test_chat_client_instructions_handling(chat_client_base: ChatClientPro
_, kwargs = mock_inner_get_response.call_args
messages = kwargs.get("messages", [])
assert len(messages) == 1
- assert messages[0].role == Role.USER
+ assert messages[0].role == "user"
assert messages[0].text == "hello"
from agent_framework._types import prepend_instructions_to_messages
appended_messages = prepend_instructions_to_messages(
- [ChatMessage(role=Role.USER, text="hello")],
+ [ChatMessage("user", ["hello"])],
instructions,
)
assert len(appended_messages) == 2
- assert appended_messages[0].role == Role.SYSTEM
+ assert appended_messages[0].role == "system"
assert appended_messages[0].text == "You are a helpful assistant."
- assert appended_messages[1].role == Role.USER
+ assert appended_messages[1].role == "user"
assert appended_messages[1].text == "hello"
diff --git a/python/packages/core/tests/core/test_function_invocation_logic.py b/python/packages/core/tests/core/test_function_invocation_logic.py
index 720d5a31d7..8d89c63bb7 100644
--- a/python/packages/core/tests/core/test_function_invocation_logic.py
+++ b/python/packages/core/tests/core/test_function_invocation_logic.py
@@ -13,7 +13,6 @@
ChatResponse,
ChatResponseUpdate,
Content,
- Role,
tool,
)
from agent_framework._middleware import FunctionInvocationContext, FunctionMiddleware
@@ -37,21 +36,21 @@ def ai_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [ai_func]})
assert exec_counter == 1
assert len(response.messages) == 3
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert response.messages[0].contents[0].type == "function_call"
assert response.messages[0].contents[0].name == "test_function"
assert response.messages[0].contents[0].arguments == '{"arg1": "value1"}'
assert response.messages[0].contents[0].call_id == "1"
- assert response.messages[1].role == Role.TOOL
+ assert response.messages[1].role == "tool"
assert response.messages[1].contents[0].type == "function_result"
assert response.messages[1].contents[0].call_id == "1"
assert response.messages[1].contents[0].result == "Processed value1"
- assert response.messages[2].role == Role.ASSISTANT
+ assert response.messages[2].role == "assistant"
assert response.messages[2].text == "done"
@@ -81,16 +80,16 @@ def ai_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [ai_func]})
assert exec_counter == 2
assert len(response.messages) == 5
- assert response.messages[0].role == Role.ASSISTANT
- assert response.messages[1].role == Role.TOOL
- assert response.messages[2].role == Role.ASSISTANT
- assert response.messages[3].role == Role.TOOL
- assert response.messages[4].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
+ assert response.messages[1].role == "tool"
+ assert response.messages[2].role == "assistant"
+ assert response.messages[3].role == "tool"
+ assert response.messages[4].role == "assistant"
assert response.messages[0].contents[0].type == "function_call"
assert response.messages[1].contents[0].type == "function_result"
assert response.messages[2].contents[0].type == "function_call"
@@ -162,7 +161,7 @@ def ai_func(user_query: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
agent = ChatAgent(chat_client=chat_client_base, tools=[ai_func])
@@ -219,7 +218,7 @@ def ai_func(user_query: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
agent = ChatAgent(chat_client=chat_client_base, tools=[ai_func])
@@ -339,11 +338,11 @@ def func_with_approval(arg1: str) -> str:
# Single function call content
func_call = Content.from_function_call(call_id="1", name=function_name, arguments='{"arg1": "value1"}')
- completion = ChatMessage(role="assistant", text="done")
+ completion = ChatMessage("assistant", ["done"])
- chat_client_base.run_responses = [
- ChatResponse(messages=ChatMessage(role="assistant", contents=[func_call]))
- ] + ([] if approval_required else [ChatResponse(messages=completion)])
+ chat_client_base.run_responses = [ChatResponse(messages=ChatMessage("assistant", [func_call]))] + (
+ [] if approval_required else [ChatResponse(messages=completion)]
+ )
chat_client_base.streaming_responses = [
[
@@ -371,7 +370,7 @@ def func_with_approval(arg1: str) -> str:
Content.from_function_call(call_id="2", name="approval_func", arguments='{"arg1": "value2"}'),
]
- chat_client_base.run_responses = [ChatResponse(messages=ChatMessage(role="assistant", contents=func_calls))]
+ chat_client_base.run_responses = [ChatResponse(messages=ChatMessage("assistant", func_calls))]
chat_client_base.streaming_responses = [
[
@@ -432,7 +431,7 @@ def func_with_approval(arg1: str) -> str:
assert messages[0].contents[0].type == "function_call"
assert messages[1].contents[0].type == "function_result"
assert messages[1].contents[0].result == "Processed value1"
- assert messages[2].role == Role.ASSISTANT
+ assert messages[2].role == "assistant"
assert messages[2].text == "done"
assert exec_counter == 1
else:
@@ -497,7 +496,7 @@ def func_rejected(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Get the response with approval requests
@@ -527,7 +526,7 @@ def func_rejected(arg1: str) -> str:
)
# Continue conversation with one approved and one rejected
- all_messages = response.messages + [ChatMessage(role="user", contents=[approved_response, rejected_response])]
+ all_messages = response.messages + [ChatMessage("user", [approved_response, rejected_response])]
# Call get_response which will process the approvals
await chat_client_base.get_response(
@@ -561,9 +560,7 @@ def func_rejected(arg1: str) -> str:
for msg in all_messages:
for content in msg.contents:
if content.type == "function_result":
- assert msg.role == Role.TOOL, (
- f"Message with FunctionResultContent must have role='tool', got '{msg.role}'"
- )
+ assert msg.role == "tool", f"Message with FunctionResultContent must have role='tool', got '{msg.role}'"
async def test_approval_requests_in_assistant_message(chat_client_base: ChatClientProtocol):
@@ -593,7 +590,7 @@ def func_with_approval(arg1: str) -> str:
# Should have one assistant message containing both the call and approval request
assert len(response.messages) == 1
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert len(response.messages[0].contents) == 2
assert response.messages[0].contents[0].type == "function_call"
assert response.messages[0].contents[1].type == "function_approval_request"
@@ -620,7 +617,7 @@ def func_with_approval(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Get approval request
@@ -630,7 +627,7 @@ def func_with_approval(arg1: str) -> str:
# Store messages (like a thread would)
persisted_messages = [
- ChatMessage(role="user", contents=[Content.from_text(text="hello")]),
+ ChatMessage("user", [Content.from_text(text="hello")]),
*response1.messages,
]
@@ -641,7 +638,7 @@ def func_with_approval(arg1: str) -> str:
function_call=approval_req.function_call,
approved=True,
)
- persisted_messages.append(ChatMessage(role="user", contents=[approval_response]))
+ persisted_messages.append(ChatMessage("user", [approval_response]))
# Continue with all persisted messages
response2 = await chat_client_base.get_response(
@@ -670,7 +667,7 @@ def func_with_approval(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response1 = await chat_client_base.get_response(
@@ -684,7 +681,7 @@ def func_with_approval(arg1: str) -> str:
approved=True,
)
- all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])]
+ all_messages = response1.messages + [ChatMessage("user", [approval_response])]
await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [func_with_approval]})
# Count function calls with the same call_id
@@ -714,7 +711,7 @@ def func_with_approval(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response1 = await chat_client_base.get_response(
@@ -728,7 +725,7 @@ def func_with_approval(arg1: str) -> str:
approved=False,
)
- all_messages = response1.messages + [ChatMessage(role="user", contents=[rejection_response])]
+ all_messages = response1.messages + [ChatMessage("user", [rejection_response])]
await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [func_with_approval]})
# Find the rejection result
@@ -771,7 +768,7 @@ def ai_func(arg1: str) -> str:
)
),
# Failsafe response when tool_choice is set to "none"
- ChatResponse(messages=ChatMessage(role="assistant", text="giving up on tools")),
+ ChatResponse(messages=ChatMessage("assistant", ["giving up on tools"])),
]
# Set max_iterations to 1 in additional_properties
@@ -798,7 +795,7 @@ def ai_func(arg1: str) -> str:
return f"Processed {arg1}"
chat_client_base.run_responses = [
- ChatResponse(messages=ChatMessage(role="assistant", text="response without function calling")),
+ ChatResponse(messages=ChatMessage("assistant", ["response without function calling"])),
]
# Disable function invocation
@@ -853,7 +850,7 @@ def error_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="final response")),
+ ChatResponse(messages=ChatMessage("assistant", ["final response"])),
]
# Set max_consecutive_errors to 2
@@ -898,7 +895,7 @@ def known_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Set terminate_on_unknown_calls to False (default)
@@ -971,7 +968,7 @@ def hidden_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Add hidden_func to additional_tools
@@ -1010,7 +1007,7 @@ def error_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Set include_detailed_errors to False (default)
@@ -1044,7 +1041,7 @@ def error_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Set include_detailed_errors to True
@@ -1114,7 +1111,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Set include_detailed_errors to True
@@ -1148,7 +1145,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Set include_detailed_errors to False (default)
@@ -1184,12 +1181,12 @@ def local_func(arg1: str) -> str:
)
chat_client_base.run_responses = [
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Send the approval response
response = await chat_client_base.get_response(
- [ChatMessage(role="user", contents=[approval_response])],
+ [ChatMessage("user", [approval_response])],
tool_choice="auto",
tools=[local_func],
)
@@ -1215,7 +1212,7 @@ def test_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Get approval request
@@ -1231,7 +1228,7 @@ def test_func(arg1: str) -> str:
)
# Continue conversation with rejection
- all_messages = response1.messages + [ChatMessage(role="user", contents=[rejection_response])]
+ all_messages = response1.messages + [ChatMessage("user", [rejection_response])]
# This should handle the rejection gracefully (not raise ToolException to user)
await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [test_func]})
@@ -1270,7 +1267,7 @@ def error_func(arg1: str) -> str:
contents=[Content.from_function_call(call_id="1", name="error_func", arguments='{"arg1": "value1"}')],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Set include_detailed_errors to False (default)
@@ -1288,7 +1285,7 @@ def error_func(arg1: str) -> str:
approved=True,
)
- all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])]
+ all_messages = response1.messages + [ChatMessage("user", [approval_response])]
# Execute the approved function (which will error)
await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [error_func]})
@@ -1333,7 +1330,7 @@ def error_func(arg1: str) -> str:
contents=[Content.from_function_call(call_id="1", name="error_func", arguments='{"arg1": "value1"}')],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Set include_detailed_errors to True
@@ -1351,7 +1348,7 @@ def error_func(arg1: str) -> str:
approved=True,
)
- all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])]
+ all_messages = response1.messages + [ChatMessage("user", [approval_response])]
# Execute the approved function (which will error)
await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [error_func]})
@@ -1396,7 +1393,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Set include_detailed_errors to True to see validation details
@@ -1414,7 +1411,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str
approved=True,
)
- all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])]
+ all_messages = response1.messages + [ChatMessage("user", [approval_response])]
# Execute the approved function (which will fail validation)
await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [typed_func]})
@@ -1455,7 +1452,7 @@ def success_func(arg1: str) -> str:
contents=[Content.from_function_call(call_id="1", name="success_func", arguments='{"arg1": "value1"}')],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Get approval request
@@ -1470,7 +1467,7 @@ def success_func(arg1: str) -> str:
approved=True,
)
- all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])]
+ all_messages = response1.messages + [ChatMessage("user", [approval_response])]
# Execute the approved function
await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [success_func]})
@@ -1516,7 +1513,7 @@ async def test_declaration_only_tool(chat_client_base: ChatClientProtocol):
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response = await chat_client_base.get_response(
@@ -1572,7 +1569,7 @@ async def func2(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [func1, func2]})
@@ -1608,7 +1605,7 @@ def plain_function(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
# Pass plain function (will be auto-converted)
@@ -1639,7 +1636,7 @@ def test_func(arg1: str) -> str:
conversation_id="conv_123", # Simulate service-side thread
),
ChatResponse(
- messages=ChatMessage(role="assistant", text="done"),
+ messages=ChatMessage("assistant", ["done"]),
conversation_id="conv_123",
),
]
@@ -1668,7 +1665,7 @@ def test_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [test_func]})
@@ -1712,7 +1709,7 @@ def sometimes_fails(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [sometimes_fails]})
@@ -2324,7 +2321,7 @@ def ai_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response = await chat_client_base.get_response(
@@ -2339,9 +2336,9 @@ def ai_func(arg1: str) -> str:
# There should be 2 messages: assistant with function call, tool result from middleware
# The loop should NOT have continued to call the LLM again
assert len(response.messages) == 2
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert response.messages[0].contents[0].type == "function_call"
- assert response.messages[1].role == Role.TOOL
+ assert response.messages[1].role == "tool"
assert response.messages[1].contents[0].type == "function_result"
assert response.messages[1].contents[0].result == "terminated by middleware"
@@ -2393,7 +2390,7 @@ def terminating_func(arg1: str) -> str:
],
)
),
- ChatResponse(messages=ChatMessage(role="assistant", text="done")),
+ ChatResponse(messages=ChatMessage("assistant", ["done"])),
]
response = await chat_client_base.get_response(
@@ -2410,9 +2407,9 @@ def terminating_func(arg1: str) -> str:
# There should be 2 messages: assistant with function calls, tool results
# The loop should NOT have continued to call the LLM again
assert len(response.messages) == 2
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert len(response.messages[0].contents) == 2
- assert response.messages[1].role == Role.TOOL
+ assert response.messages[1].role == "tool"
# Both function results should be present
assert len(response.messages[1].contents) == 2
diff --git a/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py
index 34798a4a16..18e60c383c 100644
--- a/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py
+++ b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py
@@ -49,7 +49,7 @@ async def mock_get_response(self, messages, **kwargs):
]
)
# Second call: return final response
- return ChatResponse(messages=[ChatMessage(role="assistant", text="Done!")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["Done!"])])
# Wrap the function with function invocation decorator
wrapped = _handle_function_calls_response(mock_get_response)
@@ -101,7 +101,7 @@ async def mock_get_response(self, messages, **kwargs):
)
]
)
- return ChatResponse(messages=[ChatMessage(role="assistant", text="Completed!")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["Completed!"])])
wrapped = _handle_function_calls_response(mock_get_response)
@@ -149,7 +149,7 @@ async def mock_get_response(self, messages, **kwargs):
)
]
)
- return ChatResponse(messages=[ChatMessage(role="assistant", text="All done!")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["All done!"])])
wrapped = _handle_function_calls_response(mock_get_response)
@@ -196,13 +196,10 @@ async def mock_get_streaming_response(self, messages, **kwargs):
arguments='{"value": "streaming-test"}',
)
],
- is_finished=True,
)
else:
# Second call: return final response
- yield ChatResponseUpdate(
- text=Content.from_text(text="Stream complete!"), role="assistant", is_finished=True
- )
+ yield ChatResponseUpdate(contents=[Content.from_text(text="Stream complete!")], role="assistant")
wrapped = _handle_function_calls_streaming_response(mock_get_streaming_response)
diff --git a/python/packages/core/tests/core/test_mcp.py b/python/packages/core/tests/core/test_mcp.py
index f6d2b535d8..7695affb5a 100644
--- a/python/packages/core/tests/core/test_mcp.py
+++ b/python/packages/core/tests/core/test_mcp.py
@@ -18,7 +18,6 @@
MCPStdioTool,
MCPStreamableHTTPTool,
MCPWebsocketTool,
- Role,
ToolProtocol,
)
from agent_framework._mcp import (
@@ -63,7 +62,7 @@ def test_mcp_prompt_message_to_ai_content():
ai_content = _parse_message_from_mcp(mcp_message)
assert isinstance(ai_content, ChatMessage)
- assert ai_content.role.value == "user"
+ assert ai_content.role == "user"
assert len(ai_content.contents) == 1
assert ai_content.contents[0].type == "text"
assert ai_content.contents[0].text == "Hello, world!"
@@ -1056,7 +1055,7 @@ def get_mcp_client(self) -> _AsyncGeneratorContextManager[Any, None]:
assert len(result) == 1
assert isinstance(result[0], ChatMessage)
- assert result[0].role == Role.USER
+ assert result[0].role == "user"
assert len(result[0].contents) == 1
assert result[0].contents[0].text == "Test message"
@@ -1414,7 +1413,7 @@ async def test_mcp_tool_sampling_callback_chat_client_exception():
async def test_mcp_tool_sampling_callback_no_valid_content():
"""Test sampling callback when response has no valid content types."""
- from agent_framework import ChatMessage, Role
+ from agent_framework import ChatMessage
tool = MCPStdioTool(name="test_tool", command="python")
@@ -1423,7 +1422,7 @@ async def test_mcp_tool_sampling_callback_no_valid_content():
mock_response = Mock()
mock_response.messages = [
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_uri(
uri="data:application/json;base64,e30K",
diff --git a/python/packages/core/tests/core/test_memory.py b/python/packages/core/tests/core/test_memory.py
index bcc299ed37..78b48afd87 100644
--- a/python/packages/core/tests/core/test_memory.py
+++ b/python/packages/core/tests/core/test_memory.py
@@ -4,7 +4,7 @@
from collections.abc import MutableSequence
from typing import Any
-from agent_framework import ChatMessage, Role
+from agent_framework import ChatMessage
from agent_framework._memory import Context, ContextProvider
@@ -69,7 +69,7 @@ def test_context_default_values(self) -> None:
def test_context_with_values(self) -> None:
"""Test Context can be initialized with values."""
- messages = [ChatMessage(role=Role.USER, text="Test message")]
+ messages = [ChatMessage("user", ["Test message"])]
context = Context(instructions="Test instructions", messages=messages)
assert context.instructions == "Test instructions"
assert len(context.messages) == 1
@@ -89,15 +89,15 @@ async def test_thread_created(self) -> None:
async def test_invoked(self) -> None:
"""Test invoked is called."""
provider = MockContextProvider()
- message = ChatMessage(role=Role.USER, text="Test message")
+ message = ChatMessage("user", ["Test message"])
await provider.invoked(message)
assert provider.invoked_called
assert provider.new_messages == message
async def test_invoking(self) -> None:
"""Test invoking is called and returns context."""
- provider = MockContextProvider(messages=[ChatMessage(role=Role.USER, text="Context message")])
- message = ChatMessage(role=Role.USER, text="Test message")
+ provider = MockContextProvider(messages=[ChatMessage("user", ["Context message"])])
+ message = ChatMessage("user", ["Test message"])
context = await provider.invoking(message)
assert provider.invoking_called
assert provider.model_invoking_messages == message
@@ -114,7 +114,7 @@ async def test_base_thread_created_does_nothing(self) -> None:
async def test_base_invoked_does_nothing(self) -> None:
"""Test that base ContextProvider.invoked does nothing by default."""
provider = MinimalContextProvider()
- message = ChatMessage(role=Role.USER, text="Test")
+ message = ChatMessage("user", ["Test"])
await provider.invoked(message)
await provider.invoked(message, response_messages=message)
await provider.invoked(message, invoke_exception=Exception("test"))
diff --git a/python/packages/core/tests/core/test_middleware.py b/python/packages/core/tests/core/test_middleware.py
index a62cca2c76..b0536ac94c 100644
--- a/python/packages/core/tests/core/test_middleware.py
+++ b/python/packages/core/tests/core/test_middleware.py
@@ -15,7 +15,6 @@
ChatResponse,
ChatResponseUpdate,
Content,
- Role,
)
from agent_framework._middleware import (
AgentMiddleware,
@@ -36,7 +35,7 @@ class TestAgentRunContext:
def test_init_with_defaults(self, mock_agent: AgentProtocol) -> None:
"""Test AgentRunContext initialization with default values."""
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
assert context.agent is mock_agent
@@ -46,7 +45,7 @@ def test_init_with_defaults(self, mock_agent: AgentProtocol) -> None:
def test_init_with_custom_values(self, mock_agent: AgentProtocol) -> None:
"""Test AgentRunContext initialization with custom values."""
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
metadata = {"key": "value"}
context = AgentRunContext(agent=mock_agent, messages=messages, is_streaming=True, metadata=metadata)
@@ -59,7 +58,7 @@ def test_init_with_thread(self, mock_agent: AgentProtocol) -> None:
"""Test AgentRunContext initialization with thread parameter."""
from agent_framework import AgentThread
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
thread = AgentThread()
context = AgentRunContext(agent=mock_agent, messages=messages, thread=thread)
@@ -98,7 +97,7 @@ class TestChatContext:
def test_init_with_defaults(self, mock_chat_client: Any) -> None:
"""Test ChatContext initialization with default values."""
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
@@ -112,7 +111,7 @@ def test_init_with_defaults(self, mock_chat_client: Any) -> None:
def test_init_with_custom_values(self, mock_chat_client: Any) -> None:
"""Test ChatContext initialization with custom values."""
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {"temperature": 0.5}
metadata = {"key": "value"}
@@ -169,10 +168,10 @@ async def test_middleware(context: AgentRunContext, next: Callable[[AgentRunCont
async def test_execute_no_middleware(self, mock_agent: AgentProtocol) -> None:
"""Test pipeline execution with no middleware."""
pipeline = AgentMiddlewarePipeline()
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
- expected_response = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ expected_response = AgentResponse(messages=[ChatMessage("assistant", ["response"])])
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
return expected_response
@@ -197,10 +196,10 @@ async def process(
middleware = OrderTrackingMiddleware("test")
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
- expected_response = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ expected_response = AgentResponse(messages=[ChatMessage("assistant", ["response"])])
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
execution_order.append("handler")
@@ -213,7 +212,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse:
async def test_execute_stream_no_middleware(self, mock_agent: AgentProtocol) -> None:
"""Test pipeline streaming execution with no middleware."""
pipeline = AgentMiddlewarePipeline()
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]:
@@ -245,7 +244,7 @@ async def process(
middleware = StreamOrderTrackingMiddleware("test")
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]:
@@ -267,14 +266,14 @@ async def test_execute_with_pre_next_termination(self, mock_agent: AgentProtocol
"""Test pipeline execution with termination before next()."""
middleware = self.PreNextTerminateMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
execution_order: list[str] = []
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
# Handler should not be executed when terminated before next()
execution_order.append("handler")
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response"])])
response = await pipeline.execute(mock_agent, messages, context, final_handler)
assert response is not None
@@ -287,13 +286,13 @@ async def test_execute_with_post_next_termination(self, mock_agent: AgentProtoco
"""Test pipeline execution with termination after next()."""
middleware = self.PostNextTerminateMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
execution_order: list[str] = []
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
execution_order.append("handler")
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response"])])
response = await pipeline.execute(mock_agent, messages, context, final_handler)
assert response is not None
@@ -306,7 +305,7 @@ async def test_execute_stream_with_pre_next_termination(self, mock_agent: AgentP
"""Test pipeline streaming execution with termination before next()."""
middleware = self.PreNextTerminateMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
execution_order: list[str] = []
@@ -330,7 +329,7 @@ async def test_execute_stream_with_post_next_termination(self, mock_agent: Agent
"""Test pipeline streaming execution with termination after next()."""
middleware = self.PostNextTerminateMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
execution_order: list[str] = []
@@ -366,11 +365,11 @@ async def process(
middleware = ThreadCapturingMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
thread = AgentThread()
context = AgentRunContext(agent=mock_agent, messages=messages, thread=thread)
- expected_response = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ expected_response = AgentResponse(messages=[ChatMessage("assistant", ["response"])])
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
return expected_response
@@ -393,10 +392,10 @@ async def process(
middleware = ThreadCapturingMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages, thread=None)
- expected_response = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ expected_response = AgentResponse(messages=[ChatMessage("assistant", ["response"])])
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
return expected_response
@@ -560,11 +559,11 @@ async def test_middleware(context: ChatContext, next: Callable[[ChatContext], Aw
async def test_execute_no_middleware(self, mock_chat_client: Any) -> None:
"""Test pipeline execution with no middleware."""
pipeline = ChatMiddlewarePipeline()
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
- expected_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ expected_response = ChatResponse(messages=[ChatMessage("assistant", ["response"])])
async def final_handler(ctx: ChatContext) -> ChatResponse:
return expected_response
@@ -587,11 +586,11 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
middleware = OrderTrackingChatMiddleware("test")
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
- expected_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ expected_response = ChatResponse(messages=[ChatMessage("assistant", ["response"])])
async def final_handler(ctx: ChatContext) -> ChatResponse:
execution_order.append("handler")
@@ -604,7 +603,7 @@ async def final_handler(ctx: ChatContext) -> ChatResponse:
async def test_execute_stream_no_middleware(self, mock_chat_client: Any) -> None:
"""Test pipeline streaming execution with no middleware."""
pipeline = ChatMiddlewarePipeline()
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
@@ -635,7 +634,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
middleware = StreamOrderTrackingChatMiddleware("test")
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True)
@@ -658,7 +657,7 @@ async def test_execute_with_pre_next_termination(self, mock_chat_client: Any) ->
"""Test pipeline execution with termination before next()."""
middleware = self.PreNextTerminateChatMiddleware()
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
execution_order: list[str] = []
@@ -666,7 +665,7 @@ async def test_execute_with_pre_next_termination(self, mock_chat_client: Any) ->
async def final_handler(ctx: ChatContext) -> ChatResponse:
# Handler should not be executed when terminated before next()
execution_order.append("handler")
- return ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["response"])])
response = await pipeline.execute(mock_chat_client, messages, chat_options, context, final_handler)
assert response is None
@@ -678,14 +677,14 @@ async def test_execute_with_post_next_termination(self, mock_chat_client: Any) -
"""Test pipeline execution with termination after next()."""
middleware = self.PostNextTerminateChatMiddleware()
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
execution_order: list[str] = []
async def final_handler(ctx: ChatContext) -> ChatResponse:
execution_order.append("handler")
- return ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["response"])])
response = await pipeline.execute(mock_chat_client, messages, chat_options, context, final_handler)
assert response is not None
@@ -698,7 +697,7 @@ async def test_execute_stream_with_pre_next_termination(self, mock_chat_client:
"""Test pipeline streaming execution with termination before next()."""
middleware = self.PreNextTerminateChatMiddleware()
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True)
execution_order: list[str] = []
@@ -723,7 +722,7 @@ async def test_execute_stream_with_post_next_termination(self, mock_chat_client:
"""Test pipeline streaming execution with termination after next()."""
middleware = self.PostNextTerminateChatMiddleware()
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True)
execution_order: list[str] = []
@@ -764,12 +763,12 @@ async def process(
middleware = MetadataAgentMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
metadata_updates.append("handler")
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
@@ -827,12 +826,12 @@ async def test_agent_middleware(
execution_order.append("function_after")
pipeline = AgentMiddlewarePipeline([test_agent_middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
execution_order.append("handler")
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
@@ -890,12 +889,12 @@ async def function_middleware(
execution_order.append("function_after")
pipeline = AgentMiddlewarePipeline([ClassMiddleware(), function_middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
execution_order.append("handler")
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
@@ -954,13 +953,13 @@ async def function_chat_middleware(
execution_order.append("function_after")
pipeline = ChatMiddlewarePipeline([ClassChatMiddleware(), function_chat_middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
async def final_handler(ctx: ChatContext) -> ChatResponse:
execution_order.append("handler")
- return ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["response"])])
result = await pipeline.execute(mock_chat_client, messages, chat_options, context, final_handler)
@@ -1001,12 +1000,12 @@ async def process(
middleware = [FirstMiddleware(), SecondMiddleware(), ThirdMiddleware()]
pipeline = AgentMiddlewarePipeline(middleware) # type: ignore
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
execution_order.append("handler")
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
@@ -1085,13 +1084,13 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
middleware = [FirstChatMiddleware(), SecondChatMiddleware(), ThirdChatMiddleware()]
pipeline = ChatMiddlewarePipeline(middleware) # type: ignore
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
async def final_handler(ctx: ChatContext) -> ChatResponse:
execution_order.append("handler")
- return ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["response"])])
result = await pipeline.execute(mock_chat_client, messages, chat_options, context, final_handler)
@@ -1127,7 +1126,7 @@ async def process(
# Verify context content
assert context.agent is mock_agent
assert len(context.messages) == 1
- assert context.messages[0].role == Role.USER
+ assert context.messages[0].role == "user"
assert context.messages[0].text == "test"
assert context.is_streaming is False
assert isinstance(context.metadata, dict)
@@ -1139,13 +1138,13 @@ async def process(
middleware = ContextValidationMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
# Verify metadata was set by middleware
assert ctx.metadata.get("validated") is True
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
assert result is not None
@@ -1205,7 +1204,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
# Verify context content
assert context.chat_client is mock_chat_client
assert len(context.messages) == 1
- assert context.messages[0].role == Role.USER
+ assert context.messages[0].role == "user"
assert context.messages[0].text == "test"
assert context.is_streaming is False
assert isinstance(context.metadata, dict)
@@ -1219,14 +1218,14 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
middleware = ChatContextValidationMiddleware()
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {"temperature": 0.5}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
async def final_handler(ctx: ChatContext) -> ChatResponse:
# Verify metadata was set by middleware
assert ctx.metadata.get("validated") is True
- return ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["response"])])
result = await pipeline.execute(mock_chat_client, messages, chat_options, context, final_handler)
assert result is not None
@@ -1248,14 +1247,14 @@ async def process(
middleware = StreamingFlagMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
# Test non-streaming
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
streaming_flags.append(ctx.is_streaming)
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response"])])
await pipeline.execute(mock_agent, messages, context, final_handler)
@@ -1287,7 +1286,7 @@ async def process(
middleware = StreamProcessingMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_stream_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]:
@@ -1323,7 +1322,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
middleware = ChatStreamingFlagMiddleware()
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
# Test non-streaming
@@ -1331,7 +1330,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
async def final_handler(ctx: ChatContext) -> ChatResponse:
streaming_flags.append(ctx.is_streaming)
- return ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["response"])])
await pipeline.execute(mock_chat_client, messages, chat_options, context, final_handler)
@@ -1365,7 +1364,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
middleware = ChatStreamProcessingMiddleware()
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True)
@@ -1447,7 +1446,7 @@ async def process(
middleware = NoNextMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
handler_called = False
@@ -1455,7 +1454,7 @@ async def process(
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
nonlocal handler_called
handler_called = True
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="should not execute")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["should not execute"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
@@ -1478,7 +1477,7 @@ async def process(
middleware = NoNextStreamingMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
handler_called = False
@@ -1551,7 +1550,7 @@ async def process(
await next(context)
pipeline = AgentMiddlewarePipeline([FirstMiddleware(), SecondMiddleware()])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
handler_called = False
@@ -1559,7 +1558,7 @@ async def process(
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
nonlocal handler_called
handler_called = True
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="should not execute")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["should not execute"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
@@ -1580,7 +1579,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
middleware = NoNextChatMiddleware()
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
@@ -1589,7 +1588,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
async def final_handler(ctx: ChatContext) -> ChatResponse:
nonlocal handler_called
handler_called = True
- return ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="should not execute")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["should not execute"])])
result = await pipeline.execute(mock_chat_client, messages, chat_options, context, final_handler)
@@ -1608,7 +1607,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
middleware = NoNextStreamingChatMiddleware()
pipeline = ChatMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, is_streaming=True)
@@ -1644,7 +1643,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
await next(context)
pipeline = ChatMiddlewarePipeline([FirstChatMiddleware(), SecondChatMiddleware()])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
chat_options: dict[str, Any] = {}
context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options)
@@ -1653,7 +1652,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
async def final_handler(ctx: ChatContext) -> ChatResponse:
nonlocal handler_called
handler_called = True
- return ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="should not execute")])
+ return ChatResponse(messages=[ChatMessage("assistant", ["should not execute"])])
result = await pipeline.execute(mock_chat_client, messages, chat_options, context, final_handler)
diff --git a/python/packages/core/tests/core/test_middleware_context_result.py b/python/packages/core/tests/core/test_middleware_context_result.py
index 0f3b506fab..21f893a62c 100644
--- a/python/packages/core/tests/core/test_middleware_context_result.py
+++ b/python/packages/core/tests/core/test_middleware_context_result.py
@@ -14,7 +14,6 @@
ChatAgent,
ChatMessage,
Content,
- Role,
)
from agent_framework._middleware import (
AgentMiddleware,
@@ -40,7 +39,7 @@ class TestResultOverrideMiddleware:
async def test_agent_middleware_response_override_non_streaming(self, mock_agent: AgentProtocol) -> None:
"""Test that agent middleware can override response for non-streaming execution."""
- override_response = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="overridden response")])
+ override_response = AgentResponse(messages=[ChatMessage("assistant", ["overridden response"])])
class ResponseOverrideMiddleware(AgentMiddleware):
async def process(
@@ -52,7 +51,7 @@ async def process(
middleware = ResponseOverrideMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
handler_called = False
@@ -60,7 +59,7 @@ async def process(
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
nonlocal handler_called
handler_called = True
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="original response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["original response"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
@@ -88,7 +87,7 @@ async def process(
middleware = StreamResponseOverrideMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]:
@@ -149,7 +148,7 @@ async def process(
# Then conditionally override based on content
if any("special" in msg.text for msg in context.messages if msg.text):
context.result = AgentResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Special response from middleware!")]
+ messages=[ChatMessage("assistant", ["Special response from middleware!"])]
)
# Create ChatAgent with override middleware
@@ -157,14 +156,14 @@ async def process(
agent = ChatAgent(chat_client=mock_chat_client, middleware=[middleware])
# Test override case
- override_messages = [ChatMessage(role=Role.USER, text="Give me a special response")]
+ override_messages = [ChatMessage("user", ["Give me a special response"])]
override_response = await agent.run(override_messages)
assert override_response.messages[0].text == "Special response from middleware!"
# Verify chat client was called since middleware called next()
assert mock_chat_client.call_count == 1
# Test normal case
- normal_messages = [ChatMessage(role=Role.USER, text="Normal request")]
+ normal_messages = [ChatMessage("user", ["Normal request"])]
normal_response = await agent.run(normal_messages)
assert normal_response.messages[0].text == "test response"
# Verify chat client was called for normal case
@@ -194,7 +193,7 @@ async def process(
agent = ChatAgent(chat_client=mock_chat_client, middleware=[middleware])
# Test streaming override case
- override_messages = [ChatMessage(role=Role.USER, text="Give me a custom stream")]
+ override_messages = [ChatMessage("user", ["Give me a custom stream"])]
override_updates: list[AgentResponseUpdate] = []
async for update in agent.run_stream(override_messages):
override_updates.append(update)
@@ -205,7 +204,7 @@ async def process(
assert override_updates[2].text == " response!"
# Test normal streaming case
- normal_messages = [ChatMessage(role=Role.USER, text="Normal streaming request")]
+ normal_messages = [ChatMessage("user", ["Normal streaming request"])]
normal_updates: list[AgentResponseUpdate] = []
async for update in agent.run_stream(normal_messages):
normal_updates.append(update)
@@ -234,10 +233,10 @@ async def process(
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
nonlocal handler_called
handler_called = True
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="executed response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["executed response"])])
# Test case where next() is NOT called
- no_execute_messages = [ChatMessage(role=Role.USER, text="Don't run this")]
+ no_execute_messages = [ChatMessage("user", ["Don't run this"])]
no_execute_context = AgentRunContext(agent=mock_agent, messages=no_execute_messages)
no_execute_result = await pipeline.execute(mock_agent, no_execute_messages, no_execute_context, final_handler)
@@ -252,7 +251,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse:
handler_called = False
# Test case where next() IS called
- execute_messages = [ChatMessage(role=Role.USER, text="Please execute this")]
+ execute_messages = [ChatMessage("user", ["Please execute this"])]
execute_context = AgentRunContext(agent=mock_agent, messages=execute_messages)
execute_result = await pipeline.execute(mock_agent, execute_messages, execute_context, final_handler)
@@ -332,11 +331,11 @@ async def process(
middleware = ObservabilityMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="executed response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["executed response"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
@@ -396,17 +395,15 @@ async def process(
if "modify" in context.result.messages[0].text:
# Override after observing
- context.result = AgentResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="modified after execution")]
- )
+ context.result = AgentResponse(messages=[ChatMessage("assistant", ["modified after execution"])])
middleware = PostExecutionOverrideMiddleware()
pipeline = AgentMiddlewarePipeline([middleware])
- messages = [ChatMessage(role=Role.USER, text="test")]
+ messages = [ChatMessage("user", ["test"])]
context = AgentRunContext(agent=mock_agent, messages=messages)
async def final_handler(ctx: AgentRunContext) -> AgentResponse:
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response to modify")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response to modify"])])
result = await pipeline.execute(mock_agent, messages, context, final_handler)
diff --git a/python/packages/core/tests/core/test_middleware_with_agent.py b/python/packages/core/tests/core/test_middleware_with_agent.py
index a9f410b609..51c227e0b2 100644
--- a/python/packages/core/tests/core/test_middleware_with_agent.py
+++ b/python/packages/core/tests/core/test_middleware_with_agent.py
@@ -15,7 +15,6 @@
ChatResponseUpdate,
Content,
FunctionTool,
- Role,
agent_middleware,
chat_middleware,
function_middleware,
@@ -58,13 +57,13 @@ async def process(
agent = ChatAgent(chat_client=chat_client, middleware=[middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
assert response is not None
assert len(response.messages) > 0
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
# Note: conftest "MockChatClient" returns different text format
assert "test response" in response.messages[0].text
@@ -93,7 +92,7 @@ async def process(
agent = ChatAgent(chat_client=chat_client, middleware=[middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
@@ -128,8 +127,8 @@ async def process(
# Execute the agent with multiple messages
messages = [
- ChatMessage(role=Role.USER, text="message1"),
- ChatMessage(role=Role.USER, text="message2"), # This should not be processed due to termination
+ ChatMessage("user", ["message1"]),
+ ChatMessage("user", ["message2"]), # This should not be processed due to termination
]
response = await agent.run(messages)
@@ -158,15 +157,15 @@ async def process(
# Execute the agent with multiple messages
messages = [
- ChatMessage(role=Role.USER, text="message1"),
- ChatMessage(role=Role.USER, text="message2"),
+ ChatMessage("user", ["message1"]),
+ ChatMessage("user", ["message2"]),
]
response = await agent.run(messages)
# Verify response
assert response is not None
assert len(response.messages) == 1
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert "test response" in response.messages[0].text
# Verify middleware execution order
@@ -190,7 +189,7 @@ async def process(
execution_order.append("middleware_after")
# Create a message to start the conversation
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
# Set up chat client to return a function call, then a final response
# If terminate works correctly, only the first response should be consumed
@@ -198,7 +197,7 @@ async def process(
ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="test_call", name="test_function", arguments={"text": "test"}
@@ -207,7 +206,7 @@ async def process(
)
]
),
- ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="this should not be consumed")]),
+ ChatResponse(messages=[ChatMessage("assistant", ["this should not be consumed"])]),
]
# Create the test function with the expected signature
@@ -251,7 +250,7 @@ async def process(
context.terminate = True
# Create a message to start the conversation
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
# Set up chat client to return a function call, then a final response
# If terminate works correctly, only the first response should be consumed
@@ -259,7 +258,7 @@ async def process(
ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="test_call", name="test_function", arguments={"text": "test"}
@@ -268,7 +267,7 @@ async def process(
)
]
),
- ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="this should not be consumed")]),
+ ChatResponse(messages=[ChatMessage("assistant", ["this should not be consumed"])]),
]
# Create the test function with the expected signature
@@ -312,13 +311,13 @@ async def tracking_agent_middleware(
agent = ChatAgent(chat_client=chat_client, middleware=[tracking_agent_middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
assert response is not None
assert len(response.messages) > 0
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert response.messages[0].text == "test response"
assert chat_client.call_count == 1
@@ -340,7 +339,7 @@ async def tracking_function_middleware(
agent = ChatAgent(chat_client=chat_client, middleware=[tracking_function_middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
@@ -376,13 +375,13 @@ async def process(
# Set up mock streaming responses
chat_client.streaming_responses = [
[
- ChatResponseUpdate(contents=[Content.from_text(text="Streaming")], role=Role.ASSISTANT),
- ChatResponseUpdate(contents=[Content.from_text(text=" response")], role=Role.ASSISTANT),
+ ChatResponseUpdate(contents=[Content.from_text(text="Streaming")], role="assistant"),
+ ChatResponseUpdate(contents=[Content.from_text(text=" response")], role="assistant"),
]
]
# Execute streaming
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
updates: list[AgentResponseUpdate] = []
async for update in agent.run_stream(messages):
updates.append(update)
@@ -411,7 +410,7 @@ async def process(
# Create ChatAgent with middleware
middleware = FlagTrackingMiddleware()
agent = ChatAgent(chat_client=chat_client, middleware=[middleware])
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
# Test non-streaming execution
response = await agent.run(messages)
@@ -452,7 +451,7 @@ async def process(
agent = ChatAgent(chat_client=chat_client, middleware=[middleware1, middleware2, middleware3])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
@@ -511,7 +510,7 @@ async def function_function_middleware(
)
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
@@ -567,7 +566,7 @@ async def process(
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="call_123",
@@ -578,7 +577,7 @@ async def process(
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Final response")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Final response"])])
chat_client.responses = [function_call_response, final_response]
@@ -591,7 +590,7 @@ async def process(
)
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="Get weather for Seattle")]
+ messages = [ChatMessage("user", ["Get weather for Seattle"])]
response = await agent.run(messages)
# Verify response
@@ -627,7 +626,7 @@ async def tracking_function_middleware(
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="call_456",
@@ -638,7 +637,7 @@ async def tracking_function_middleware(
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Final response")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Final response"])])
chat_client.responses = [function_call_response, final_response]
@@ -650,7 +649,7 @@ async def tracking_function_middleware(
)
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="Get weather for San Francisco")]
+ messages = [ChatMessage("user", ["Get weather for San Francisco"])]
response = await agent.run(messages)
# Verify response
@@ -699,7 +698,7 @@ async def process(
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="call_789",
@@ -710,7 +709,7 @@ async def process(
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Final response")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Final response"])])
chat_client.responses = [function_call_response, final_response]
@@ -722,7 +721,7 @@ async def process(
)
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="Get weather for New York")]
+ messages = [ChatMessage("user", ["Get weather for New York"])]
response = await agent.run(messages)
# Verify response
@@ -786,7 +785,7 @@ async def kwargs_middleware(
ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="test_call", name="sample_tool_function", arguments={"location": "Seattle"}
@@ -795,16 +794,14 @@ async def kwargs_middleware(
)
]
),
- ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("Function completed")])]
- ),
+ ChatResponse(messages=[ChatMessage("assistant", [Content.from_text("Function completed")])]),
]
# Create ChatAgent with function middleware
agent = ChatAgent(chat_client=chat_client, middleware=[kwargs_middleware], tools=[sample_tool_function])
# Execute the agent with custom parameters passed as kwargs
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages, custom_param="test_value")
# Verify response
@@ -1068,7 +1065,7 @@ async def test_run_level_middleware_non_streaming(self, chat_client: "MockChatCl
# Verify response is correct
assert response is not None
assert len(response.messages) > 0
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert "test response" in response.messages[0].text
# Verify middleware was executed
@@ -1097,8 +1094,8 @@ async def process(
# Set up mock streaming responses
chat_client.streaming_responses = [
[
- ChatResponseUpdate(contents=[Content.from_text(text="Stream")], role=Role.ASSISTANT),
- ChatResponseUpdate(contents=[Content.from_text(text=" response")], role=Role.ASSISTANT),
+ ChatResponseUpdate(contents=[Content.from_text(text="Stream")], role="assistant"),
+ ChatResponseUpdate(contents=[Content.from_text(text=" response")], role="assistant"),
]
]
@@ -1182,7 +1179,7 @@ def custom_tool(message: str) -> str:
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="test_call",
@@ -1193,7 +1190,7 @@ def custom_tool(message: str) -> str:
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Final response")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Final response"])])
chat_client.responses = [function_call_response, final_response]
# Create agent with agent-level middleware
@@ -1275,7 +1272,7 @@ def custom_tool(message: str) -> str:
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="test_call",
@@ -1286,7 +1283,7 @@ def custom_tool(message: str) -> str:
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Final response")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Final response"])])
chat_client.responses = [function_call_response, final_response]
# Should work without errors
@@ -1296,7 +1293,7 @@ def custom_tool(message: str) -> str:
tools=[custom_tool_wrapped],
)
- response = await agent.run([ChatMessage(role=Role.USER, text="test")])
+ response = await agent.run([ChatMessage("user", ["test"])])
assert response is not None
assert "decorator_type_match_agent" in execution_order
@@ -1317,7 +1314,7 @@ async def mismatched_middleware(
await next(context)
agent = ChatAgent(chat_client=chat_client, middleware=[mismatched_middleware])
- await agent.run([ChatMessage(role=Role.USER, text="test")])
+ await agent.run([ChatMessage("user", ["test"])])
async def test_only_decorator_specified(self, chat_client: Any) -> None:
"""Only decorator specified - rely on decorator."""
@@ -1346,7 +1343,7 @@ def custom_tool(message: str) -> str:
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="test_call",
@@ -1357,7 +1354,7 @@ def custom_tool(message: str) -> str:
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Final response")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Final response"])])
chat_client.responses = [function_call_response, final_response]
# Should work - relies on decorator
@@ -1367,7 +1364,7 @@ def custom_tool(message: str) -> str:
tools=[custom_tool_wrapped],
)
- response = await agent.run([ChatMessage(role=Role.USER, text="test")])
+ response = await agent.run([ChatMessage("user", ["test"])])
assert response is not None
assert "decorator_only_agent" in execution_order
@@ -1402,7 +1399,7 @@ def custom_tool(message: str) -> str:
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="test_call",
@@ -1413,7 +1410,7 @@ def custom_tool(message: str) -> str:
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Final response")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Final response"])])
chat_client.responses = [function_call_response, final_response]
# Should work - relies on type annotations
@@ -1421,7 +1418,7 @@ def custom_tool(message: str) -> str:
chat_client=chat_client, middleware=[type_only_agent, type_only_function], tools=[custom_tool_wrapped]
)
- response = await agent.run([ChatMessage(role=Role.USER, text="test")])
+ response = await agent.run([ChatMessage("user", ["test"])])
assert response is not None
assert "type_only_agent" in execution_order
@@ -1436,7 +1433,7 @@ async def no_info_middleware(context: Any, next: Any) -> None: # No decorator,
# Should raise MiddlewareException
with pytest.raises(MiddlewareException, match="Cannot determine middleware type"):
agent = ChatAgent(chat_client=chat_client, middleware=[no_info_middleware])
- await agent.run([ChatMessage(role=Role.USER, text="test")])
+ await agent.run([ChatMessage("user", ["test"])])
async def test_insufficient_parameters_error(self, chat_client: Any) -> None:
"""Test that middleware with insufficient parameters raises an error."""
@@ -1450,7 +1447,7 @@ async def insufficient_params_middleware(context: Any) -> None: # Missing 'next
pass
agent = ChatAgent(chat_client=chat_client, middleware=[insufficient_params_middleware])
- await agent.run([ChatMessage(role=Role.USER, text="test")])
+ await agent.run([ChatMessage("user", ["test"])])
async def test_decorator_markers_preserved(self) -> None:
"""Test that decorator markers are properly set on functions."""
@@ -1523,7 +1520,7 @@ async def process(
thread = agent.get_new_thread()
# First run
- first_messages = [ChatMessage(role=Role.USER, text="first message")]
+ first_messages = [ChatMessage("user", ["first message"])]
first_response = await agent.run(first_messages, thread=thread)
# Verify first response
@@ -1531,7 +1528,7 @@ async def process(
assert len(first_response.messages) > 0
# Second run - use the same thread
- second_messages = [ChatMessage(role=Role.USER, text="second message")]
+ second_messages = [ChatMessage("user", ["second message"])]
second_response = await agent.run(second_messages, thread=thread)
# Verify second response
@@ -1603,13 +1600,13 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
agent = ChatAgent(chat_client=chat_client, middleware=[middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
assert response is not None
assert len(response.messages) > 0
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert "test response" in response.messages[0].text
assert execution_order == ["chat_middleware_before", "chat_middleware_after"]
@@ -1629,13 +1626,13 @@ async def tracking_chat_middleware(
agent = ChatAgent(chat_client=chat_client, middleware=[tracking_chat_middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
assert response is not None
assert len(response.messages) > 0
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert "test response" in response.messages[0].text
assert execution_order == ["chat_middleware_before", "chat_middleware_after"]
@@ -1649,10 +1646,10 @@ async def message_modifier_middleware(
# Modify the first message by adding a prefix
if context.messages:
for idx, msg in enumerate(context.messages):
- if msg.role.value == "system":
+ if msg.role == "system":
continue
original_text = msg.text or ""
- context.messages[idx] = ChatMessage(role=msg.role, text=f"MODIFIED: {original_text}")
+ context.messages[idx] = ChatMessage(msg.role, [f"MODIFIED: {original_text}"])
break
await next(context)
@@ -1661,7 +1658,7 @@ async def message_modifier_middleware(
agent = ChatAgent(chat_client=chat_client, middleware=[message_modifier_middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify that the message was modified (MockBaseChatClient echoes back the input)
@@ -1677,7 +1674,7 @@ async def response_override_middleware(
) -> None:
# Override the response without calling next()
context.result = ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Middleware overridden response")],
+ messages=[ChatMessage("assistant", ["Middleware overridden response"])],
response_id="middleware-response-123",
)
context.terminate = True
@@ -1687,7 +1684,7 @@ async def response_override_middleware(
agent = ChatAgent(chat_client=chat_client, middleware=[response_override_middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify that the response was overridden
@@ -1717,7 +1714,7 @@ async def second_middleware(context: ChatContext, next: Callable[[ChatContext],
agent = ChatAgent(chat_client=chat_client, middleware=[first_middleware, second_middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
@@ -1743,13 +1740,13 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
# Set up mock streaming responses
chat_client.streaming_responses = [
[
- ChatResponseUpdate(contents=[Content.from_text(text="Stream")], role=Role.ASSISTANT),
- ChatResponseUpdate(contents=[Content.from_text(text=" response")], role=Role.ASSISTANT),
+ ChatResponseUpdate(contents=[Content.from_text(text="Stream")], role="assistant"),
+ ChatResponseUpdate(contents=[Content.from_text(text=" response")], role="assistant"),
]
]
# Execute streaming
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
updates: list[AgentResponseUpdate] = []
async for update in agent.run_stream(messages):
updates.append(update)
@@ -1770,9 +1767,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
execution_order.append("middleware_before")
context.terminate = True
# Set a custom response since we're terminating
- context.result = ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Terminated by middleware")]
- )
+ context.result = ChatResponse(messages=[ChatMessage("assistant", ["Terminated by middleware"])])
# We call next() but since terminate=True, execution should stop
await next(context)
execution_order.append("middleware_after")
@@ -1782,7 +1777,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
agent = ChatAgent(chat_client=chat_client, middleware=[PreTerminationChatMiddleware()])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response was from middleware
@@ -1807,7 +1802,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai
agent = ChatAgent(chat_client=chat_client, middleware=[PostTerminationChatMiddleware()])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response is from actual execution
@@ -1843,7 +1838,7 @@ async def function_middleware(
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="call_456",
@@ -1854,7 +1849,7 @@ async def function_middleware(
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Final response")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Final response"])])
chat_client = use_function_invocation(MockBaseChatClient)()
chat_client.run_responses = [function_call_response, final_response]
@@ -1867,7 +1862,7 @@ async def function_middleware(
)
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="Get weather for San Francisco")]
+ messages = [ChatMessage("user", ["Get weather for San Francisco"])]
response = await agent.run(messages)
# Verify response
@@ -1924,7 +1919,7 @@ async def kwargs_middleware(
agent = ChatAgent(chat_client=chat_client, middleware=[kwargs_middleware])
# Execute the agent with custom parameters
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages, temperature=0.7, max_tokens=100, custom_param="test_value")
# Verify response
@@ -1973,7 +1968,7 @@ def __init__(self):
self.middleware = [TrackingMiddleware()]
async def run(self, messages=None, *, thread=None, **kwargs) -> AgentResponse:
- return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")])
+ return AgentResponse(messages=[ChatMessage("assistant", ["response"])])
def run_stream(self, messages=None, *, thread=None, **kwargs) -> AsyncIterable[AgentResponseUpdate]:
async def _stream():
diff --git a/python/packages/core/tests/core/test_middleware_with_chat.py b/python/packages/core/tests/core/test_middleware_with_chat.py
index ef2f6f3c09..a3893e1a6e 100644
--- a/python/packages/core/tests/core/test_middleware_with_chat.py
+++ b/python/packages/core/tests/core/test_middleware_with_chat.py
@@ -12,7 +12,6 @@
Content,
FunctionInvocationContext,
FunctionTool,
- Role,
chat_middleware,
function_middleware,
use_chat_middleware,
@@ -43,13 +42,13 @@ async def process(
chat_client_base.middleware = [LoggingChatMiddleware()]
# Execute chat client directly
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await chat_client_base.get_response(messages)
# Verify response
assert response is not None
assert len(response.messages) > 0
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
# Verify middleware execution order
assert execution_order == ["chat_middleware_before", "chat_middleware_after"]
@@ -68,13 +67,13 @@ async def logging_chat_middleware(context: ChatContext, next: Callable[[ChatCont
chat_client_base.middleware = [logging_chat_middleware]
# Execute chat client directly
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await chat_client_base.get_response(messages)
# Verify response
assert response is not None
assert len(response.messages) > 0
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
# Verify middleware execution order
assert execution_order == ["function_middleware_before", "function_middleware_after"]
@@ -89,14 +88,14 @@ async def message_modifier_middleware(
# Modify the first message by adding a prefix
if context.messages and len(context.messages) > 0:
original_text = context.messages[0].text or ""
- context.messages[0] = ChatMessage(role=context.messages[0].role, text=f"MODIFIED: {original_text}")
+ context.messages[0] = ChatMessage(context.messages[0].role, [f"MODIFIED: {original_text}"])
await next(context)
# Add middleware to chat client
chat_client_base.middleware = [message_modifier_middleware]
# Execute chat client
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await chat_client_base.get_response(messages)
# Verify that the message was modified (MockChatClient echoes back the input)
@@ -114,7 +113,7 @@ async def response_override_middleware(
) -> None:
# Override the response without calling next()
context.result = ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Middleware overridden response")],
+ messages=[ChatMessage("assistant", ["Middleware overridden response"])],
response_id="middleware-response-123",
)
context.terminate = True
@@ -123,7 +122,7 @@ async def response_override_middleware(
chat_client_base.middleware = [response_override_middleware]
# Execute chat client
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await chat_client_base.get_response(messages)
# Verify that the response was overridden
@@ -152,7 +151,7 @@ async def second_middleware(context: ChatContext, next: Callable[[ChatContext],
chat_client_base.middleware = [first_middleware, second_middleware]
# Execute chat client
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await chat_client_base.get_response(messages)
# Verify response
@@ -180,13 +179,13 @@ async def agent_level_chat_middleware(
agent = ChatAgent(chat_client=chat_client, middleware=[agent_level_chat_middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
assert response is not None
assert len(response.messages) > 0
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
# Verify middleware execution order
assert execution_order == ["agent_chat_middleware_before", "agent_chat_middleware_after"]
@@ -211,7 +210,7 @@ async def second_middleware(context: ChatContext, next: Callable[[ChatContext],
agent = ChatAgent(chat_client=chat_client_base, middleware=[first_middleware, second_middleware])
# Execute the agent
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await agent.run(messages)
# Verify response
@@ -237,7 +236,7 @@ async def streaming_middleware(context: ChatContext, next: Callable[[ChatContext
chat_client_base.middleware = [streaming_middleware]
# Execute streaming response
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
updates: list[object] = []
async for update in chat_client_base.get_streaming_response(messages):
updates.append(update)
@@ -258,19 +257,19 @@ async def counting_middleware(context: ChatContext, next: Callable[[ChatContext]
await next(context)
# First call with run-level middleware
- messages = [ChatMessage(role=Role.USER, text="first message")]
+ messages = [ChatMessage("user", ["first message"])]
response1 = await chat_client_base.get_response(messages, middleware=[counting_middleware])
assert response1 is not None
assert execution_count["count"] == 1
# Second call WITHOUT run-level middleware - should not execute the middleware
- messages = [ChatMessage(role=Role.USER, text="second message")]
+ messages = [ChatMessage("user", ["second message"])]
response2 = await chat_client_base.get_response(messages)
assert response2 is not None
assert execution_count["count"] == 1 # Should still be 1, not 2
# Third call with run-level middleware again - should execute
- messages = [ChatMessage(role=Role.USER, text="third message")]
+ messages = [ChatMessage("user", ["third message"])]
response3 = await chat_client_base.get_response(messages, middleware=[counting_middleware])
assert response3 is not None
assert execution_count["count"] == 2 # Should be 2 now
@@ -301,7 +300,7 @@ async def kwargs_middleware(context: ChatContext, next: Callable[[ChatContext],
chat_client_base.middleware = [kwargs_middleware]
# Execute chat client with custom parameters
- messages = [ChatMessage(role=Role.USER, text="test message")]
+ messages = [ChatMessage("user", ["test message"])]
response = await chat_client_base.get_response(
messages, temperature=0.7, max_tokens=100, custom_param="test_value"
)
@@ -355,7 +354,7 @@ def sample_tool(location: str) -> str:
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="call_1",
@@ -366,14 +365,12 @@ def sample_tool(location: str) -> str:
)
]
)
- final_response = ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Based on the weather data, it's sunny!")]
- )
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Based on the weather data, it's sunny!"])])
chat_client.run_responses = [function_call_response, final_response]
# Execute the chat client directly with tools - this should trigger function invocation and middleware
- messages = [ChatMessage(role=Role.USER, text="What's the weather in San Francisco?")]
+ messages = [ChatMessage("user", ["What's the weather in San Francisco?"])]
response = await chat_client.get_response(messages, options={"tools": [sample_tool_wrapped]})
# Verify response
@@ -418,7 +415,7 @@ def sample_tool(location: str) -> str:
function_call_response = ChatResponse(
messages=[
ChatMessage(
- role=Role.ASSISTANT,
+ role="assistant",
contents=[
Content.from_function_call(
call_id="call_2",
@@ -430,13 +427,13 @@ def sample_tool(location: str) -> str:
]
)
final_response = ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="The weather information has been retrieved!")]
+ messages=[ChatMessage("assistant", ["The weather information has been retrieved!"])]
)
chat_client.run_responses = [function_call_response, final_response]
# Execute the chat client directly with run-level middleware and tools
- messages = [ChatMessage(role=Role.USER, text="What's the weather in New York?")]
+ messages = [ChatMessage("user", ["What's the weather in New York?"])]
response = await chat_client.get_response(
messages, options={"tools": [sample_tool_wrapped]}, middleware=[run_level_function_middleware]
)
diff --git a/python/packages/core/tests/core/test_observability.py b/python/packages/core/tests/core/test_observability.py
index 3818a057bb..726f19c1af 100644
--- a/python/packages/core/tests/core/test_observability.py
+++ b/python/packages/core/tests/core/test_observability.py
@@ -14,12 +14,13 @@
AGENT_FRAMEWORK_USER_AGENT,
AgentProtocol,
AgentResponse,
+ AgentResponseUpdate,
AgentThread,
BaseChatClient,
ChatMessage,
ChatResponse,
ChatResponseUpdate,
- Role,
+ Content,
UsageDetails,
prepend_agent_framework_to_user_agent,
tool,
@@ -217,7 +218,7 @@ async def _inner_get_response(
self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any
):
return ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Test response")],
+ messages=[ChatMessage("assistant", ["Test response"])],
usage_details=UsageDetails(input_token_count=10, output_token_count=20),
finish_reason=None,
)
@@ -225,8 +226,8 @@ async def _inner_get_response(
async def _inner_get_streaming_response(
self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any
):
- yield ChatResponseUpdate(text="Hello", role=Role.ASSISTANT)
- yield ChatResponseUpdate(text=" world", role=Role.ASSISTANT)
+ yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")], role="assistant")
+ yield ChatResponseUpdate(contents=[Content.from_text(text=" world")], role="assistant")
return MockChatClient
@@ -236,7 +237,7 @@ async def test_chat_client_observability(mock_chat_client, span_exporter: InMemo
"""Test that when diagnostics are enabled, telemetry is applied."""
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test message")]
+ messages = [ChatMessage("user", ["Test message"])]
span_exporter.clear()
response = await client.get_response(messages=messages, model_id="Test")
assert response is not None
@@ -259,7 +260,7 @@ async def test_chat_client_streaming_observability(
):
"""Test streaming telemetry through the use_instrumentation decorator."""
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test")]
+ messages = [ChatMessage("user", ["Test"])]
span_exporter.clear()
# Collect all yielded updates
updates = []
@@ -288,7 +289,7 @@ async def test_chat_client_observability_with_instructions(
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test message")]
+ messages = [ChatMessage("user", ["Test message"])]
options = {"model_id": "Test", "instructions": "You are a helpful assistant."}
span_exporter.clear()
response = await client.get_response(messages=messages, options=options)
@@ -317,7 +318,7 @@ async def test_chat_client_streaming_observability_with_instructions(
import json
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test")]
+ messages = [ChatMessage("user", ["Test"])]
options = {"model_id": "Test", "instructions": "You are a helpful assistant."}
span_exporter.clear()
@@ -344,7 +345,7 @@ async def test_chat_client_observability_without_instructions(
"""Test that system_instructions attribute is not set when instructions are not provided."""
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test message")]
+ messages = [ChatMessage("user", ["Test message"])]
options = {"model_id": "Test"} # No instructions
span_exporter.clear()
response = await client.get_response(messages=messages, options=options)
@@ -365,7 +366,7 @@ async def test_chat_client_observability_with_empty_instructions(
"""Test that system_instructions attribute is not set when instructions is an empty string."""
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test message")]
+ messages = [ChatMessage("user", ["Test message"])]
options = {"model_id": "Test", "instructions": ""} # Empty string
span_exporter.clear()
response = await client.get_response(messages=messages, options=options)
@@ -388,7 +389,7 @@ async def test_chat_client_observability_with_list_instructions(
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test message")]
+ messages = [ChatMessage("user", ["Test message"])]
options = {"model_id": "Test", "instructions": ["Instruction 1", "Instruction 2"]}
span_exporter.clear()
response = await client.get_response(messages=messages, options=options)
@@ -409,7 +410,7 @@ async def test_chat_client_observability_with_list_instructions(
async def test_chat_client_without_model_id_observability(mock_chat_client, span_exporter: InMemorySpanExporter):
"""Test telemetry shouldn't fail when the model_id is not provided for unknown reason."""
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test")]
+ messages = [ChatMessage("user", ["Test"])]
span_exporter.clear()
response = await client.get_response(messages=messages)
@@ -428,7 +429,7 @@ async def test_chat_client_streaming_without_model_id_observability(
):
"""Test streaming telemetry shouldn't fail when the model_id is not provided for unknown reason."""
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test")]
+ messages = [ChatMessage("user", ["Test"])]
span_exporter.clear()
# Collect all yielded updates
updates = []
@@ -535,17 +536,16 @@ def __init__(self):
async def run(self, messages=None, *, thread=None, **kwargs):
return AgentResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Agent response")],
+ messages=[ChatMessage("assistant", ["Agent response"])],
usage_details=UsageDetails(input_token_count=15, output_token_count=25),
response_id="test_response_id",
raw_representation=Mock(finish_reason=Mock(value="stop")),
)
async def run_stream(self, messages=None, *, thread=None, **kwargs):
- from agent_framework import AgentResponseUpdate
- yield AgentResponseUpdate(text="Hello", role=Role.ASSISTANT)
- yield AgentResponseUpdate(text=" from agent", role=Role.ASSISTANT)
+ yield AgentResponseUpdate(contents=[Content.from_text(text="Hello")], role="assistant")
+ yield AgentResponseUpdate(contents=[Content.from_text(text=" from agent")], role="assistant")
return MockChatClientAgent
@@ -1338,7 +1338,7 @@ async def _inner_get_response(self, *, messages, options, **kwargs):
raise ValueError("Test error")
client = use_instrumentation(FailingChatClient)()
- messages = [ChatMessage(role=Role.USER, text="Test")]
+ messages = [ChatMessage("user", ["Test"])]
span_exporter.clear()
with pytest.raises(ValueError, match="Test error"):
@@ -1356,11 +1356,11 @@ async def test_chat_client_streaming_observability_exception(mock_chat_client, s
class FailingStreamingChatClient(mock_chat_client):
async def _inner_get_streaming_response(self, *, messages, options, **kwargs):
- yield ChatResponseUpdate(text="Hello", role=Role.ASSISTANT)
+ yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")], role="assistant")
raise ValueError("Streaming error")
client = use_instrumentation(FailingStreamingChatClient)()
- messages = [ChatMessage(role=Role.USER, text="Test")]
+ messages = [ChatMessage("user", ["Test"])]
span_exporter.clear()
with pytest.raises(ValueError, match="Streaming error"):
@@ -1431,12 +1431,11 @@ def test_get_response_attributes_with_finish_reason():
"""Test _get_response_attributes includes finish_reason."""
from unittest.mock import Mock
- from agent_framework import FinishReason
from agent_framework.observability import OtelAttr, _get_response_attributes
response = Mock()
response.response_id = None
- response.finish_reason = FinishReason.STOP
+ response.finish_reason = "stop"
response.raw_representation = None
response.usage_details = None
@@ -1608,11 +1607,10 @@ def test_get_response_attributes_finish_reason_from_raw():
"""Test _get_response_attributes gets finish_reason from raw_representation."""
from unittest.mock import Mock
- from agent_framework import FinishReason
from agent_framework.observability import OtelAttr, _get_response_attributes
raw_rep = Mock()
- raw_rep.finish_reason = FinishReason.LENGTH
+ raw_rep.finish_reason = "length"
response = Mock()
response.response_id = None
@@ -1668,8 +1666,7 @@ async def run(
**kwargs,
):
return AgentResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Test response")],
- thread=thread,
+ messages=[ChatMessage("assistant", ["Test response"])],
)
async def run_stream(
@@ -1679,9 +1676,8 @@ async def run_stream(
thread=None,
**kwargs,
):
- from agent_framework import AgentResponseUpdate
- yield AgentResponseUpdate(text="Test", role=Role.ASSISTANT)
+ yield AgentResponseUpdate(contents=[Content.from_text(text="Test")], role="assistant")
decorated_agent = use_agent_instrumentation(MockAgent)
agent = decorated_agent()
@@ -1697,7 +1693,6 @@ async def run_stream(
@pytest.mark.parametrize("enable_sensitive_data", [True], indirect=True)
async def test_agent_observability_with_exception(span_exporter: InMemorySpanExporter, enable_sensitive_data):
"""Test agent instrumentation captures exceptions."""
- from agent_framework import AgentResponseUpdate
from agent_framework.observability import use_agent_instrumentation
class FailingAgent(AgentProtocol):
@@ -1730,7 +1725,7 @@ async def run(self, messages=None, *, thread=None, **kwargs):
async def run_stream(self, messages=None, *, thread=None, **kwargs):
# yield before raise to make this an async generator
- yield AgentResponseUpdate(text="", role=Role.ASSISTANT)
+ yield AgentResponseUpdate(contents=[Content.from_text(text="")], role="assistant")
raise RuntimeError("Agent failed")
decorated_agent = use_agent_instrumentation(FailingAgent)
@@ -1751,7 +1746,6 @@ async def run_stream(self, messages=None, *, thread=None, **kwargs):
@pytest.mark.parametrize("enable_sensitive_data", [True, False], indirect=True)
async def test_agent_streaming_observability(span_exporter: InMemorySpanExporter, enable_sensitive_data):
"""Test agent streaming instrumentation."""
- from agent_framework import AgentResponseUpdate
from agent_framework.observability import use_agent_instrumentation
class StreamingAgent(AgentProtocol):
@@ -1781,13 +1775,12 @@ def default_options(self):
async def run(self, messages=None, *, thread=None, **kwargs):
return AgentResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Test")],
- thread=thread,
+ messages=[ChatMessage("assistant", ["Test"])],
)
async def run_stream(self, messages=None, *, thread=None, **kwargs):
- yield AgentResponseUpdate(text="Hello ", role=Role.ASSISTANT)
- yield AgentResponseUpdate(text="World", role=Role.ASSISTANT)
+ yield AgentResponseUpdate(contents=[Content.from_text(text="Hello ")], role="assistant")
+ yield AgentResponseUpdate(contents=[Content.from_text(text="World")], role="assistant")
decorated_agent = use_agent_instrumentation(StreamingAgent)
agent = decorated_agent()
@@ -1836,24 +1829,22 @@ async def test_capture_messages_with_finish_reason(mock_chat_client, span_export
"""Test that finish_reason is captured in output messages."""
import json
- from agent_framework import FinishReason
-
class ClientWithFinishReason(mock_chat_client):
async def _inner_get_response(self, *, messages, options, **kwargs):
return ChatResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Done")],
+ messages=[ChatMessage("assistant", ["Done"])],
usage_details=UsageDetails(input_token_count=5, output_token_count=10),
- finish_reason=FinishReason.STOP,
+ finish_reason="stop",
)
client = use_instrumentation(ClientWithFinishReason)()
- messages = [ChatMessage(role=Role.USER, text="Test")]
+ messages = [ChatMessage("user", ["Test"])]
span_exporter.clear()
response = await client.get_response(messages=messages, model_id="Test")
assert response is not None
- assert response.finish_reason == FinishReason.STOP
+ assert response.finish_reason == "stop"
spans = span_exporter.get_finished_spans()
assert len(spans) == 1
span = spans[0]
@@ -1869,7 +1860,6 @@ async def _inner_get_response(self, *, messages, options, **kwargs):
@pytest.mark.parametrize("enable_sensitive_data", [True], indirect=True)
async def test_agent_streaming_exception(span_exporter: InMemorySpanExporter, enable_sensitive_data):
"""Test agent streaming captures exceptions."""
- from agent_framework import AgentResponseUpdate
from agent_framework.observability import use_agent_instrumentation
class FailingStreamingAgent(AgentProtocol):
@@ -1898,10 +1888,10 @@ def default_options(self):
return self._default_options
async def run(self, messages=None, *, thread=None, **kwargs):
- return AgentResponse(messages=[], thread=thread)
+ return AgentResponse(messages=[])
async def run_stream(self, messages=None, *, thread=None, **kwargs):
- yield AgentResponseUpdate(text="Starting", role=Role.ASSISTANT)
+ yield AgentResponseUpdate(contents=[Content.from_text(text="Starting")], role="assistant")
raise RuntimeError("Stream failed")
decorated_agent = use_agent_instrumentation(FailingStreamingAgent)
@@ -1924,7 +1914,7 @@ async def run_stream(self, messages=None, *, thread=None, **kwargs):
async def test_chat_client_when_disabled(mock_chat_client, span_exporter: InMemorySpanExporter):
"""Test that no spans are created when instrumentation is disabled."""
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test")]
+ messages = [ChatMessage("user", ["Test"])]
span_exporter.clear()
response = await client.get_response(messages=messages, model_id="Test")
@@ -1939,7 +1929,7 @@ async def test_chat_client_when_disabled(mock_chat_client, span_exporter: InMemo
async def test_chat_client_streaming_when_disabled(mock_chat_client, span_exporter: InMemorySpanExporter):
"""Test streaming creates no spans when instrumentation is disabled."""
client = use_instrumentation(mock_chat_client)()
- messages = [ChatMessage(role=Role.USER, text="Test")]
+ messages = [ChatMessage("user", ["Test"])]
span_exporter.clear()
updates = []
@@ -1982,12 +1972,11 @@ def default_options(self):
return self._default_options
async def run(self, messages=None, *, thread=None, **kwargs):
- return AgentResponse(messages=[], thread=thread)
+ return AgentResponse(messages=[])
async def run_stream(self, messages=None, *, thread=None, **kwargs):
- from agent_framework import AgentResponseUpdate
- yield AgentResponseUpdate(text="test", role=Role.ASSISTANT)
+ yield AgentResponseUpdate(contents=[Content.from_text(text="test")], role="assistant")
decorated = use_agent_instrumentation(TestAgent)
agent = decorated()
@@ -2002,7 +1991,6 @@ async def run_stream(self, messages=None, *, thread=None, **kwargs):
@pytest.mark.parametrize("enable_instrumentation", [False], indirect=True)
async def test_agent_streaming_when_disabled(span_exporter: InMemorySpanExporter):
"""Test agent streaming creates no spans when disabled."""
- from agent_framework import AgentResponseUpdate
from agent_framework.observability import use_agent_instrumentation
class TestAgent(AgentProtocol):
@@ -2031,10 +2019,10 @@ def default_options(self):
return self._default_options
async def run(self, messages=None, *, thread=None, **kwargs):
- return AgentResponse(messages=[], thread=thread)
+ return AgentResponse(messages=[])
async def run_stream(self, messages=None, *, thread=None, **kwargs):
- yield AgentResponseUpdate(text="test", role=Role.ASSISTANT)
+ yield AgentResponseUpdate(contents=[Content.from_text(text="test")], role="assistant")
decorated = use_agent_instrumentation(TestAgent)
agent = decorated()
diff --git a/python/packages/core/tests/core/test_threads.py b/python/packages/core/tests/core/test_threads.py
index 01d5ceb98f..241cbf4a90 100644
--- a/python/packages/core/tests/core/test_threads.py
+++ b/python/packages/core/tests/core/test_threads.py
@@ -5,7 +5,7 @@
import pytest
-from agent_framework import AgentThread, ChatMessage, ChatMessageStore, Role
+from agent_framework import AgentThread, ChatMessage, ChatMessageStore
from agent_framework._threads import AgentThreadState, ChatMessageStoreState
from agent_framework.exceptions import AgentThreadException
@@ -44,16 +44,16 @@ async def deserialize(cls, serialized_store_state: Any, **kwargs: Any) -> "MockC
def sample_messages() -> list[ChatMessage]:
"""Fixture providing sample chat messages for testing."""
return [
- ChatMessage(role=Role.USER, text="Hello", message_id="msg1"),
- ChatMessage(role=Role.ASSISTANT, text="Hi there!", message_id="msg2"),
- ChatMessage(role=Role.USER, text="How are you?", message_id="msg3"),
+ ChatMessage("user", ["Hello"], message_id="msg1"),
+ ChatMessage("assistant", ["Hi there!"], message_id="msg2"),
+ ChatMessage("user", ["How are you?"], message_id="msg3"),
]
@pytest.fixture
def sample_message() -> ChatMessage:
"""Fixture providing a single sample chat message for testing."""
- return ChatMessage(role=Role.USER, text="Test message", message_id="test1")
+ return ChatMessage("user", ["Test message"], message_id="test1")
class TestAgentThread:
@@ -178,7 +178,7 @@ async def test_on_new_messages_multiple_messages(self, sample_messages: list[Cha
async def test_on_new_messages_with_existing_store(self, sample_message: ChatMessage) -> None:
"""Test _on_new_messages adds to existing message store."""
- initial_messages = [ChatMessage(role=Role.USER, text="Initial", message_id="init1")]
+ initial_messages = [ChatMessage("user", ["Initial"], message_id="init1")]
store = ChatMessageStore(initial_messages)
thread = AgentThread(message_store=store)
@@ -226,7 +226,7 @@ async def test_deserialize_with_existing_store(self) -> None:
thread = AgentThread(message_store=store)
serialized_data: dict[str, Any] = {
"service_thread_id": None,
- "chat_message_store_state": {"messages": [ChatMessage(role="user", text="test")]},
+ "chat_message_store_state": {"messages": [ChatMessage("user", ["test"])]},
}
await thread.update_from_thread_state(serialized_data)
@@ -449,7 +449,7 @@ def test_init_with_chat_message_store_state_no_messages(self) -> None:
def test_init_with_chat_message_store_state_object(self) -> None:
"""Test AgentThreadState initialization with ChatMessageStoreState object."""
- store_state = ChatMessageStoreState(messages=[ChatMessage(role=Role.USER, text="test")])
+ store_state = ChatMessageStoreState(messages=[ChatMessage("user", ["test"])])
state = AgentThreadState(chat_message_store_state=store_state)
assert state.service_thread_id is None
diff --git a/python/packages/core/tests/core/test_tools.py b/python/packages/core/tests/core/test_tools.py
index a60018c7a4..9187c9f0f3 100644
--- a/python/packages/core/tests/core/test_tools.py
+++ b/python/packages/core/tests/core/test_tools.py
@@ -959,7 +959,7 @@ async def get_response(self, messages, **kwargs):
return response
# Default response
return ChatResponse(
- messages=[ChatMessage(role="assistant", contents=["Default response"])],
+ messages=[ChatMessage("assistant", ["Default response"])],
)
async def get_streaming_response(self, messages, **kwargs):
@@ -973,7 +973,7 @@ async def get_streaming_response(self, messages, **kwargs):
yield ChatResponseUpdate(contents=[content], role=msg.role)
else:
# Default response
- yield ChatResponseUpdate(text="Default response", role="assistant")
+ yield ChatResponseUpdate(contents=[Content.from_text(text="Default response")], role="assistant")
return MockChatClient()
@@ -1015,7 +1015,7 @@ async def test_non_streaming_single_function_no_approval():
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="The result is 10")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["The result is 10"])])
call_count = [0]
responses = [initial_response, final_response]
@@ -1100,7 +1100,7 @@ async def test_non_streaming_two_functions_both_no_approval():
)
]
)
- final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Both tools executed successfully")])
+ final_response = ChatResponse(messages=[ChatMessage("assistant", ["Both tools executed successfully"])])
call_count = [0]
responses = [initial_response, final_response]
@@ -1227,7 +1227,7 @@ async def test_streaming_single_function_no_approval():
role="assistant",
)
]
- final_updates = [ChatResponseUpdate(text="The result is 10", role="assistant")]
+ final_updates = [ChatResponseUpdate(contents=[Content.from_text(text="The result is 10")], role="assistant")]
call_count = [0]
updates_list = [initial_updates, final_updates]
@@ -1246,13 +1246,12 @@ async def mock_get_streaming_response(self, messages, **kwargs):
updates.append(update)
# Verify: should have function call update, tool result update (injected), and final update
- from agent_framework import Role
assert len(updates) >= 3
# First update is the function call
assert updates[0].contents[0].type == "function_call"
# Second update should be the tool result (injected by the wrapper)
- assert updates[1].role == Role.TOOL
+ assert updates[1].role == "tool"
assert updates[1].contents[0].type == "function_result"
assert updates[1].contents[0].result == 10 # 5 * 2
# Last update is the final message
@@ -1294,11 +1293,10 @@ async def mock_get_streaming_response(self, messages, **kwargs):
updates.append(update)
# Verify: should yield function call and then approval request
- from agent_framework import Role
assert len(updates) == 2
assert updates[0].contents[0].type == "function_call"
- assert updates[1].role == Role.ASSISTANT
+ assert updates[1].role == "assistant"
assert updates[1].contents[0].type == "function_approval_request"
@@ -1319,7 +1317,9 @@ async def test_streaming_two_functions_both_no_approval():
role="assistant",
),
]
- final_updates = [ChatResponseUpdate(text="Both tools executed successfully", role="assistant")]
+ final_updates = [
+ ChatResponseUpdate(contents=[Content.from_text(text="Both tools executed successfully")], role="assistant")
+ ]
call_count = [0]
updates_list = [initial_updates, final_updates]
@@ -1338,7 +1338,6 @@ async def mock_get_streaming_response(self, messages, **kwargs):
updates.append(update)
# Verify: should have both function calls, one tool result update with both results, and final message
- from agent_framework import Role
assert len(updates) >= 2
# First update has both function calls
@@ -1346,7 +1345,7 @@ async def mock_get_streaming_response(self, messages, **kwargs):
assert updates[0].contents[0].type == "function_call"
assert updates[0].contents[1].type == "function_call"
# Should have a tool result update with both results
- tool_updates = [u for u in updates if u.role == Role.TOOL]
+ tool_updates = [u for u in updates if u.role == "tool"]
assert len(tool_updates) == 1
assert len(tool_updates[0].contents) == 2
assert all(c.type == "function_result" for c in tool_updates[0].contents)
@@ -1392,13 +1391,12 @@ async def mock_get_streaming_response(self, messages, **kwargs):
updates.append(update)
# Verify: should yield both function calls and then approval requests
- from agent_framework import Role
assert len(updates) == 3
assert updates[0].contents[0].type == "function_call"
assert updates[1].contents[0].type == "function_call"
# Assistant update with both approval requests
- assert updates[2].role == Role.ASSISTANT
+ assert updates[2].role == "assistant"
assert len(updates[2].contents) == 2
assert all(c.type == "function_approval_request" for c in updates[2].contents)
@@ -1443,13 +1441,12 @@ async def mock_get_streaming_response(self, messages, **kwargs):
updates.append(update)
# Verify: should yield both function calls and then approval requests (when one needs approval, all wait)
- from agent_framework import Role
assert len(updates) == 3
assert updates[0].contents[0].type == "function_call"
assert updates[1].contents[0].type == "function_call"
# Assistant update with both approval requests
- assert updates[2].role == Role.ASSISTANT
+ assert updates[2].role == "assistant"
assert len(updates[2].contents) == 2
assert all(c.type == "function_approval_request" for c in updates[2].contents)
diff --git a/python/packages/core/tests/core/test_types.py b/python/packages/core/tests/core/test_types.py
index 8236d75d20..3e7e435077 100644
--- a/python/packages/core/tests/core/test_types.py
+++ b/python/packages/core/tests/core/test_types.py
@@ -19,8 +19,6 @@
ChatResponse,
ChatResponseUpdate,
Content,
- FinishReason,
- Role,
TextSpanRegion,
ToolMode,
ToolProtocol,
@@ -36,6 +34,8 @@
_parse_content_list,
_validate_uri,
add_usage_details,
+ normalize_messages,
+ prepare_messages,
validate_tool_mode,
)
from agent_framework.exceptions import ContentError
@@ -573,10 +573,10 @@ def test_ai_content_serialization(args: dict):
def test_chat_message_text():
"""Test the ChatMessage class to ensure it initializes correctly with text content."""
# Create a ChatMessage with a role and text content
- message = ChatMessage(role="user", text="Hello, how are you?")
+ message = ChatMessage("user", ["Hello, how are you?"])
# Check the type and content
- assert message.role == Role.USER
+ assert message.role == "user"
assert len(message.contents) == 1
assert message.contents[0].type == "text"
assert message.contents[0].text == "Hello, how are you?"
@@ -591,10 +591,10 @@ def test_chat_message_contents():
# Create a ChatMessage with a role and multiple contents
content1 = Content.from_text("Hello, how are you?")
content2 = Content.from_text("I'm fine, thank you!")
- message = ChatMessage(role="user", contents=[content1, content2])
+ message = ChatMessage("user", [content1, content2])
# Check the type and content
- assert message.role == Role.USER
+ assert message.role == "user"
assert len(message.contents) == 2
assert message.contents[0].type == "text"
assert message.contents[1].type == "text"
@@ -604,8 +604,8 @@ def test_chat_message_contents():
def test_chat_message_with_chatrole_instance():
- m = ChatMessage(role=Role.USER, text="hi")
- assert m.role == Role.USER
+ m = ChatMessage("user", ["hi"])
+ assert m.role == "user"
assert m.text == "hi"
@@ -615,13 +615,13 @@ def test_chat_message_with_chatrole_instance():
def test_chat_response():
"""Test the ChatResponse class to ensure it initializes correctly with a message."""
# Create a ChatMessage
- message = ChatMessage(role="assistant", text="I'm doing well, thank you!")
+ message = ChatMessage("assistant", ["I'm doing well, thank you!"])
# Create a ChatResponse with the message
response = ChatResponse(messages=message)
# Check the type and content
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert response.messages[0].text == "I'm doing well, thank you!"
assert isinstance(response.messages[0], ChatMessage)
# __str__ returns text
@@ -635,32 +635,30 @@ class OutputModel(BaseModel):
def test_chat_response_with_format():
"""Test the ChatResponse class to ensure it initializes correctly with a message."""
# Create a ChatMessage
- message = ChatMessage(role="assistant", text='{"response": "Hello"}')
+ message = ChatMessage("assistant", ['{"response": "Hello"}'])
# Create a ChatResponse with the message
response = ChatResponse(messages=message)
# Check the type and content
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert response.messages[0].text == '{"response": "Hello"}'
assert isinstance(response.messages[0], ChatMessage)
assert response.text == '{"response": "Hello"}'
+ # Since no response_format was provided, value is None and accessing it returns None
assert response.value is None
- response.try_parse_value(OutputModel)
- assert response.value is not None
- assert response.value.response == "Hello"
def test_chat_response_with_format_init():
"""Test the ChatResponse class to ensure it initializes correctly with a message."""
# Create a ChatMessage
- message = ChatMessage(role="assistant", text='{"response": "Hello"}')
+ message = ChatMessage("assistant", ['{"response": "Hello"}'])
# Create a ChatResponse with the message
response = ChatResponse(messages=message, response_format=OutputModel)
# Check the type and content
- assert response.messages[0].role == Role.ASSISTANT
+ assert response.messages[0].role == "assistant"
assert response.messages[0].text == '{"response": "Hello"}'
assert isinstance(response.messages[0], ChatMessage)
assert response.text == '{"response": "Hello"}'
@@ -676,7 +674,7 @@ class StrictSchema(BaseModel):
name: str = Field(min_length=10)
score: int = Field(gt=0, le=100)
- message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}')
+ message = ChatMessage("assistant", ['{"id": 1, "name": "test", "score": -5}'])
response = ChatResponse(messages=message, response_format=StrictSchema)
with raises(ValidationError) as exc_info:
@@ -689,32 +687,17 @@ class StrictSchema(BaseModel):
assert "score" in error_fields, "Expected 'score' gt constraint error"
-def test_chat_response_try_parse_value_returns_none_on_invalid():
- """Test that try_parse_value returns None on validation failure with Field constraints."""
-
- class StrictSchema(BaseModel):
- id: Literal[5]
- name: str = Field(min_length=10)
- score: int = Field(gt=0, le=100)
-
- message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}')
- response = ChatResponse(messages=message)
-
- result = response.try_parse_value(StrictSchema)
- assert result is None
-
-
-def test_chat_response_try_parse_value_returns_value_on_success():
- """Test that try_parse_value returns parsed value when all constraints pass."""
+def test_chat_response_value_with_valid_schema():
+ """Test that value property returns parsed value when all constraints pass."""
class MySchema(BaseModel):
name: str = Field(min_length=3)
score: int = Field(ge=0, le=100)
- message = ChatMessage(role="assistant", text='{"name": "test", "score": 85}')
- response = ChatResponse(messages=message)
+ message = ChatMessage("assistant", ['{"name": "test", "score": 85}'])
+ response = ChatResponse(messages=message, response_format=MySchema)
- result = response.try_parse_value(MySchema)
+ result = response.value
assert result is not None
assert result.name == "test"
assert result.score == 85
@@ -728,7 +711,7 @@ class StrictSchema(BaseModel):
name: str = Field(min_length=10)
score: int = Field(gt=0, le=100)
- message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}')
+ message = ChatMessage("assistant", ['{"id": 1, "name": "test", "score": -5}'])
response = AgentResponse(messages=message, response_format=StrictSchema)
with raises(ValidationError) as exc_info:
@@ -741,32 +724,17 @@ class StrictSchema(BaseModel):
assert "score" in error_fields, "Expected 'score' gt constraint error"
-def test_agent_response_try_parse_value_returns_none_on_invalid():
- """Test that AgentResponse.try_parse_value returns None on Field constraint failure."""
-
- class StrictSchema(BaseModel):
- id: Literal[5]
- name: str = Field(min_length=10)
- score: int = Field(gt=0, le=100)
-
- message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}')
- response = AgentResponse(messages=message)
-
- result = response.try_parse_value(StrictSchema)
- assert result is None
-
-
-def test_agent_response_try_parse_value_returns_value_on_success():
- """Test that AgentResponse.try_parse_value returns parsed value when all constraints pass."""
+def test_agent_response_value_with_valid_schema():
+ """Test that AgentResponse.value property returns parsed value when all constraints pass."""
class MySchema(BaseModel):
name: str = Field(min_length=3)
score: int = Field(ge=0, le=100)
- message = ChatMessage(role="assistant", text='{"name": "test", "score": 85}')
- response = AgentResponse(messages=message)
+ message = ChatMessage("assistant", ['{"name": "test", "score": 85}'])
+ response = AgentResponse(messages=message, response_format=MySchema)
- result = response.try_parse_value(MySchema)
+ result = response.value
assert result is not None
assert result.name == "test"
assert result.score == 85
@@ -797,12 +765,12 @@ def test_chat_response_updates_to_chat_response_one():
# Create a ChatResponseUpdate with the message
response_updates = [
- ChatResponseUpdate(text=message1, message_id="1"),
- ChatResponseUpdate(text=message2, message_id="1"),
+ ChatResponseUpdate(contents=[message1], message_id="1"),
+ ChatResponseUpdate(contents=[message2], message_id="1"),
]
# Convert to ChatResponse
- chat_response = ChatResponse.from_chat_response_updates(response_updates)
+ chat_response = ChatResponse.from_updates(response_updates)
# Check the type and content
assert len(chat_response.messages) == 1
@@ -820,12 +788,12 @@ def test_chat_response_updates_to_chat_response_two():
# Create a ChatResponseUpdate with the message
response_updates = [
- ChatResponseUpdate(text=message1, message_id="1"),
- ChatResponseUpdate(text=message2, message_id="2"),
+ ChatResponseUpdate(contents=[message1], message_id="1"),
+ ChatResponseUpdate(contents=[message2], message_id="2"),
]
# Convert to ChatResponse
- chat_response = ChatResponse.from_chat_response_updates(response_updates)
+ chat_response = ChatResponse.from_updates(response_updates)
# Check the type and content
assert len(chat_response.messages) == 2
@@ -844,13 +812,13 @@ def test_chat_response_updates_to_chat_response_multiple():
# Create a ChatResponseUpdate with the message
response_updates = [
- ChatResponseUpdate(text=message1, message_id="1"),
+ ChatResponseUpdate(contents=[message1], message_id="1"),
ChatResponseUpdate(contents=[Content.from_text_reasoning(text="Additional context")], message_id="1"),
- ChatResponseUpdate(text=message2, message_id="1"),
+ ChatResponseUpdate(contents=[message2], message_id="1"),
]
# Convert to ChatResponse
- chat_response = ChatResponse.from_chat_response_updates(response_updates)
+ chat_response = ChatResponse.from_updates(response_updates)
# Check the type and content
assert len(chat_response.messages) == 1
@@ -868,15 +836,15 @@ def test_chat_response_updates_to_chat_response_multiple_multiple():
# Create a ChatResponseUpdate with the message
response_updates = [
- ChatResponseUpdate(text=message1, message_id="1"),
- ChatResponseUpdate(text=message2, message_id="1"),
+ ChatResponseUpdate(contents=[message1], message_id="1"),
+ ChatResponseUpdate(contents=[message2], message_id="1"),
ChatResponseUpdate(contents=[Content.from_text_reasoning(text="Additional context")], message_id="1"),
ChatResponseUpdate(contents=[Content.from_text(text="More context")], message_id="1"),
- ChatResponseUpdate(text="Final part", message_id="1"),
+ ChatResponseUpdate(contents=[Content.from_text(text="Final part")], message_id="1"),
]
# Convert to ChatResponse
- chat_response = ChatResponse.from_chat_response_updates(response_updates)
+ chat_response = ChatResponse.from_updates(response_updates)
# Check the type and content
assert len(chat_response.messages) == 1
@@ -897,32 +865,30 @@ def test_chat_response_updates_to_chat_response_multiple_multiple():
async def test_chat_response_from_async_generator():
async def gen() -> AsyncIterable[ChatResponseUpdate]:
- yield ChatResponseUpdate(text="Hello", message_id="1")
- yield ChatResponseUpdate(text=" world", message_id="1")
+ yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")], message_id="1")
+ yield ChatResponseUpdate(contents=[Content.from_text(text=" world")], message_id="1")
- resp = await ChatResponse.from_chat_response_generator(gen())
+ resp = await ChatResponse.from_update_generator(gen())
assert resp.text == "Hello world"
async def test_chat_response_from_async_generator_output_format():
async def gen() -> AsyncIterable[ChatResponseUpdate]:
- yield ChatResponseUpdate(text='{ "respon', message_id="1")
- yield ChatResponseUpdate(text='se": "Hello" }', message_id="1")
+ yield ChatResponseUpdate(contents=[Content.from_text(text='{ "respon')], message_id="1")
+ yield ChatResponseUpdate(contents=[Content.from_text(text='se": "Hello" }')], message_id="1")
- resp = await ChatResponse.from_chat_response_generator(gen())
+ # Note: Without output_format_type, value is None and we cannot parse
+ resp = await ChatResponse.from_update_generator(gen())
assert resp.text == '{ "response": "Hello" }'
assert resp.value is None
- resp.try_parse_value(OutputModel)
- assert resp.value is not None
- assert resp.value.response == "Hello"
async def test_chat_response_from_async_generator_output_format_in_method():
async def gen() -> AsyncIterable[ChatResponseUpdate]:
- yield ChatResponseUpdate(text='{ "respon', message_id="1")
- yield ChatResponseUpdate(text='se": "Hello" }', message_id="1")
+ yield ChatResponseUpdate(contents=[Content.from_text(text='{ "respon')], message_id="1")
+ yield ChatResponseUpdate(contents=[Content.from_text(text='se": "Hello" }')], message_id="1")
- resp = await ChatResponse.from_chat_response_generator(gen(), output_format_type=OutputModel)
+ resp = await ChatResponse.from_update_generator(gen(), output_format_type=OutputModel)
assert resp.text == '{ "response": "Hello" }'
assert resp.value is not None
assert resp.value.response == "Hello"
@@ -1080,7 +1046,7 @@ def test_chat_options_and_tool_choice_required_specific_function() -> None:
@fixture
def chat_message() -> ChatMessage:
- return ChatMessage(role=Role.USER, text="Hello")
+ return ChatMessage("user", ["Hello"])
@fixture
@@ -1095,7 +1061,7 @@ def agent_response(chat_message: ChatMessage) -> AgentResponse:
@fixture
def agent_response_update(text_content: Content) -> AgentResponseUpdate:
- return AgentResponseUpdate(role=Role.ASSISTANT, contents=[text_content])
+ return AgentResponseUpdate(role="assistant", contents=[text_content])
# region AgentResponse
@@ -1129,7 +1095,7 @@ def test_agent_run_response_text_property_empty() -> None:
def test_agent_run_response_from_updates(agent_response_update: AgentResponseUpdate) -> None:
updates = [agent_response_update, agent_response_update]
- response = AgentResponse.from_agent_run_response_updates(updates)
+ response = AgentResponse.from_updates(updates)
assert len(response.messages) > 0
assert response.text == "Test contentTest content"
@@ -1174,7 +1140,7 @@ def test_agent_run_response_update_created_at() -> None:
utc_timestamp = "2024-12-01T00:31:30.000000Z"
update = AgentResponseUpdate(
contents=[Content.from_text(text="test")],
- role=Role.ASSISTANT,
+ role="assistant",
created_at=utc_timestamp,
)
assert update.created_at == utc_timestamp
@@ -1185,7 +1151,7 @@ def test_agent_run_response_update_created_at() -> None:
formatted_utc = now_utc.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
update_with_now = AgentResponseUpdate(
contents=[Content.from_text(text="test")],
- role=Role.ASSISTANT,
+ role="assistant",
created_at=formatted_utc,
)
assert update_with_now.created_at == formatted_utc
@@ -1197,7 +1163,7 @@ def test_agent_run_response_created_at() -> None:
# Test with a properly formatted UTC timestamp
utc_timestamp = "2024-12-01T00:31:30.000000Z"
response = AgentResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Hello")],
+ messages=[ChatMessage("assistant", ["Hello"])],
created_at=utc_timestamp,
)
assert response.created_at == utc_timestamp
@@ -1207,7 +1173,7 @@ def test_agent_run_response_created_at() -> None:
now_utc = datetime.now(tz=timezone.utc)
formatted_utc = now_utc.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
response_with_now = AgentResponse(
- messages=[ChatMessage(role=Role.ASSISTANT, text="Hello")],
+ messages=[ChatMessage("assistant", ["Hello"])],
created_at=formatted_utc,
)
assert response_with_now.created_at == formatted_utc
@@ -1271,7 +1237,7 @@ def test_function_call_merge_in_process_update_and_usage_aggregation():
# plus usage
u3 = ChatResponseUpdate(contents=[Content.from_usage(UsageDetails(input_token_count=1, output_token_count=2))])
- resp = ChatResponse.from_chat_response_updates([u1, u2, u3])
+ resp = ChatResponse.from_updates([u1, u2, u3])
assert len(resp.messages) == 1
last_contents = resp.messages[0].contents
assert any(c.type == "function_call" for c in last_contents)
@@ -1287,7 +1253,7 @@ def test_function_call_incompatible_ids_are_not_merged():
u1 = ChatResponseUpdate(contents=[Content.from_function_call(call_id="a", name="f", arguments="x")], message_id="m")
u2 = ChatResponseUpdate(contents=[Content.from_function_call(call_id="b", name="f", arguments="y")], message_id="m")
- resp = ChatResponse.from_chat_response_updates([u1, u2])
+ resp = ChatResponse.from_updates([u1, u2])
fcs = [c for c in resp.messages[0].contents if c.type == "function_call"]
assert len(fcs) == 2
@@ -1295,18 +1261,23 @@ def test_function_call_incompatible_ids_are_not_merged():
# region Role & FinishReason basics
-def test_chat_role_str_and_repr():
- assert str(Role.USER) == "user"
- assert "Role(value=" in repr(Role.USER)
+def test_chat_role_is_string():
+ """Role is now a NewType of str, so roles are just strings."""
+ role = "user"
+ assert role == "user"
+ assert isinstance(role, str)
-def test_chat_finish_reason_constants():
- assert FinishReason.STOP.value == "stop"
+def test_chat_finish_reason_is_string():
+ """FinishReason is now a NewType of str, so finish reasons are just strings."""
+ finish_reason = "stop"
+ assert finish_reason == "stop"
+ assert isinstance(finish_reason, str)
def test_response_update_propagates_fields_and_metadata():
upd = ChatResponseUpdate(
- text="hello",
+ contents=[Content.from_text(text="hello")],
role="assistant",
author_name="bot",
response_id="rid",
@@ -1314,17 +1285,17 @@ def test_response_update_propagates_fields_and_metadata():
conversation_id="cid",
model_id="model-x",
created_at="t0",
- finish_reason=FinishReason.STOP,
+ finish_reason="stop",
additional_properties={"k": "v"},
)
- resp = ChatResponse.from_chat_response_updates([upd])
+ resp = ChatResponse.from_updates([upd])
assert resp.response_id == "rid"
assert resp.created_at == "t0"
assert resp.conversation_id == "cid"
assert resp.model_id == "model-x"
- assert resp.finish_reason == FinishReason.STOP
+ assert resp.finish_reason == "stop"
assert resp.additional_properties and resp.additional_properties["k"] == "v"
- assert resp.messages[0].role == Role.ASSISTANT
+ assert resp.messages[0].role == "assistant"
assert resp.messages[0].author_name == "bot"
assert resp.messages[0].message_id == "mid"
@@ -1332,9 +1303,9 @@ def test_response_update_propagates_fields_and_metadata():
def test_text_coalescing_preserves_first_properties():
t1 = Content.from_text("A", raw_representation={"r": 1}, additional_properties={"p": 1})
t2 = Content.from_text("B")
- upd1 = ChatResponseUpdate(text=t1, message_id="x")
- upd2 = ChatResponseUpdate(text=t2, message_id="x")
- resp = ChatResponse.from_chat_response_updates([upd1, upd2])
+ upd1 = ChatResponseUpdate(contents=[t1], message_id="x")
+ upd2 = ChatResponseUpdate(contents=[t2], message_id="x")
+ resp = ChatResponse.from_updates([upd1, upd2])
# After coalescing there should be a single TextContent with merged text and preserved props from first
items = [c for c in resp.messages[0].contents if c.type == "text"]
assert len(items) >= 1
@@ -1359,7 +1330,7 @@ def test_chat_tool_mode_eq_with_string():
@fixture
def agent_run_response_async() -> AgentResponse:
- return AgentResponse(messages=[ChatMessage(role="user", text="Hello")])
+ return AgentResponse(messages=[ChatMessage("user", ["Hello"])])
async def test_agent_run_response_from_async_generator():
@@ -1587,7 +1558,7 @@ def test_chat_message_complex_content_serialization():
Content.from_function_result(call_id="call1", result="success"),
]
- message = ChatMessage(role=Role.ASSISTANT, contents=contents)
+ message = ChatMessage("assistant", contents)
# Test to_dict
message_dict = message.to_dict()
@@ -1663,7 +1634,7 @@ def test_chat_response_complex_serialization():
{"role": "user", "contents": [{"type": "text", "text": "Hello"}]},
{"role": "assistant", "contents": [{"type": "text", "text": "Hi there"}]},
],
- "finish_reason": {"value": "stop"},
+ "finish_reason": "stop",
"usage_details": {
"type": "usage_details",
"input_token_count": 5,
@@ -1676,7 +1647,7 @@ def test_chat_response_complex_serialization():
response = ChatResponse.from_dict(response_data)
assert len(response.messages) == 2
assert isinstance(response.messages[0], ChatMessage)
- assert isinstance(response.finish_reason, FinishReason)
+ assert isinstance(response.finish_reason, str)
assert isinstance(response.usage_details, dict)
assert response.model_id == "gpt-4" # Should be stored as model_id
@@ -1684,7 +1655,7 @@ def test_chat_response_complex_serialization():
response_dict = response.to_dict()
assert len(response_dict["messages"]) == 2
assert isinstance(response_dict["messages"][0], dict)
- assert isinstance(response_dict["finish_reason"], dict)
+ assert isinstance(response_dict["finish_reason"], str)
assert isinstance(response_dict["usage_details"], dict)
assert response_dict["model_id"] == "gpt-4" # Should serialize as model_id
@@ -1794,20 +1765,20 @@ def test_agent_run_response_update_all_content_types():
update = AgentResponseUpdate.from_dict(update_data)
assert len(update.contents) == 12 # unknown_type is logged and ignored
- assert isinstance(update.role, Role)
- assert update.role.value == "assistant"
+ assert isinstance(update.role, str)
+ assert update.role == "assistant"
# Test to_dict with role conversion
update_dict = update.to_dict()
assert len(update_dict["contents"]) == 12 # unknown_type was ignored during from_dict
- assert isinstance(update_dict["role"], dict)
+ assert isinstance(update_dict["role"], str)
# Test role as string conversion
update_data_str_role = update_data.copy()
update_data_str_role["role"] = "user"
update_str = AgentResponseUpdate.from_dict(update_data_str_role)
- assert isinstance(update_str.role, Role)
- assert update_str.role.value == "user"
+ assert isinstance(update_str.role, str)
+ assert update_str.role == "user"
# region Serialization
@@ -1936,7 +1907,7 @@ def test_agent_run_response_update_all_content_types():
pytest.param(
ChatMessage,
{
- "role": {"type": "role", "value": "user"},
+ "role": "user",
"contents": [
{"type": "text", "text": "Hello"},
{"type": "function_call", "call_id": "call-1", "name": "test_func", "arguments": {}},
@@ -1953,16 +1924,16 @@ def test_agent_run_response_update_all_content_types():
"messages": [
{
"type": "chat_message",
- "role": {"type": "role", "value": "user"},
+ "role": "user",
"contents": [{"type": "text", "text": "Hello"}],
},
{
"type": "chat_message",
- "role": {"type": "role", "value": "assistant"},
+ "role": "assistant",
"contents": [{"type": "text", "text": "Hi there"}],
},
],
- "finish_reason": {"type": "finish_reason", "value": "stop"},
+ "finish_reason": "stop",
"usage_details": {
"type": "usage_details",
"input_token_count": 10,
@@ -1981,8 +1952,8 @@ def test_agent_run_response_update_all_content_types():
{"type": "text", "text": "Hello"},
{"type": "function_call", "call_id": "call-1", "name": "test_func", "arguments": {}},
],
- "role": {"type": "role", "value": "assistant"},
- "finish_reason": {"type": "finish_reason", "value": "stop"},
+ "role": "assistant",
+ "finish_reason": "stop",
"message_id": "msg-123",
"response_id": "resp-123",
},
@@ -1993,11 +1964,11 @@ def test_agent_run_response_update_all_content_types():
{
"messages": [
{
- "role": {"type": "role", "value": "user"},
+ "role": "user",
"contents": [{"type": "text", "text": "Question"}],
},
{
- "role": {"type": "role", "value": "assistant"},
+ "role": "assistant",
"contents": [{"type": "text", "text": "Answer"}],
},
],
@@ -2018,7 +1989,7 @@ def test_agent_run_response_update_all_content_types():
{"type": "text", "text": "Streaming"},
{"type": "function_call", "call_id": "call-1", "name": "test_func", "arguments": {}},
],
- "role": {"type": "role", "value": "assistant"},
+ "role": "assistant",
"message_id": "msg-123",
"response_id": "run-123",
"author_name": "Agent",
@@ -2519,3 +2490,1046 @@ def test_validate_uri_data_uri():
# endregion
+
+
+# region Test normalize_messages and prepare_messages with Content
+
+
+def test_normalize_messages_with_string():
+ """Test normalize_messages converts a string to a user message."""
+ result = normalize_messages("hello")
+ assert len(result) == 1
+ assert result[0].role == "user"
+ assert result[0].text == "hello"
+
+
+def test_normalize_messages_with_content():
+ """Test normalize_messages converts a Content object to a user message."""
+ content = Content.from_text("hello")
+ result = normalize_messages(content)
+ assert len(result) == 1
+ assert result[0].role == "user"
+ assert len(result[0].contents) == 1
+ assert result[0].contents[0].text == "hello"
+
+
+def test_normalize_messages_with_sequence_including_content():
+ """Test normalize_messages handles a sequence with Content objects."""
+ content = Content.from_text("image caption")
+ msg = ChatMessage("assistant", ["response"])
+ result = normalize_messages(["query", content, msg])
+ assert len(result) == 3
+ assert result[0].role == "user"
+ assert result[0].text == "query"
+ assert result[1].role == "user"
+ assert result[1].contents[0].text == "image caption"
+ assert result[2].role == "assistant"
+ assert result[2].text == "response"
+
+
+def test_prepare_messages_with_content():
+ """Test prepare_messages converts a Content object to a user message."""
+ content = Content.from_text("hello")
+ result = prepare_messages(content)
+ assert len(result) == 1
+ assert result[0].role == "user"
+ assert result[0].contents[0].text == "hello"
+
+
+def test_prepare_messages_with_content_and_system_instructions():
+ """Test prepare_messages handles Content with system instructions."""
+ content = Content.from_text("hello")
+ result = prepare_messages(content, system_instructions="Be helpful")
+ assert len(result) == 2
+ assert result[0].role == "system"
+ assert result[0].text == "Be helpful"
+ assert result[1].role == "user"
+ assert result[1].contents[0].text == "hello"
+
+
+def test_parse_content_list_with_strings():
+ """Test _parse_content_list converts strings to TextContent."""
+ result = _parse_content_list(["hello", "world"])
+ assert len(result) == 2
+ assert result[0].type == "text"
+ assert result[0].text == "hello"
+ assert result[1].type == "text"
+ assert result[1].text == "world"
+
+
+def test_parse_content_list_with_none_values():
+ """Test _parse_content_list skips None values."""
+ result = _parse_content_list(["hello", None, "world", None])
+ assert len(result) == 2
+ assert result[0].text == "hello"
+ assert result[1].text == "world"
+
+
+def test_parse_content_list_with_invalid_dict():
+ """Test _parse_content_list raises on invalid content dict missing type."""
+ # Invalid dict without type raises ValueError
+ with pytest.raises(ValueError, match="requires 'type'"):
+ _parse_content_list([{"invalid": "data"}])
+
+
+# region detect_media_type_from_base64 additional formats
+
+
+def test_detect_media_type_gif87a():
+ """Test detecting GIF87a format."""
+ gif_data = b"GIF87a" + b"fake_data"
+ assert detect_media_type_from_base64(data_bytes=gif_data) == "image/gif"
+
+
+def test_detect_media_type_bmp():
+ """Test detecting BMP format."""
+ bmp_data = b"BM" + b"fake_data"
+ assert detect_media_type_from_base64(data_bytes=bmp_data) == "image/bmp"
+
+
+def test_detect_media_type_svg():
+ """Test detecting SVG format."""
+ svg_data = b"