Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
e4042f4
Python: Provider-leading client design & OpenAI package extraction
eavanvalkenburg Mar 20, 2026
b65fbd8
fix: missing Agent imports in samples, .model_id → .model in foundry_…
eavanvalkenburg Mar 20, 2026
f3d118b
fix: CI failures — mypy errors, coverage targets, sample imports
eavanvalkenburg Mar 20, 2026
ee26f9d
fix: populate openai .pyi stub, fix broken README links, coverage tar…
eavanvalkenburg Mar 20, 2026
6cae909
fixes
eavanvalkenburg Mar 20, 2026
86039db
updated observabilitty
eavanvalkenburg Mar 20, 2026
d9a6e85
reset azure init.pyi
eavanvalkenburg Mar 20, 2026
0882ba5
fix errors
eavanvalkenburg Mar 20, 2026
225256d
updated adr number
eavanvalkenburg Mar 20, 2026
f178bd0
fix foundry local
eavanvalkenburg Mar 21, 2026
951e86c
fixed not renamed docstrings and comments, and added deprecated marke…
eavanvalkenburg Mar 23, 2026
c9e8666
fix tests and pyprojects
eavanvalkenburg Mar 23, 2026
8778eff
fix test vars
eavanvalkenburg Mar 23, 2026
88732c4
updated function tests
eavanvalkenburg Mar 23, 2026
8e433b6
update durable
eavanvalkenburg Mar 23, 2026
70bc17e
updated test setup for functions
eavanvalkenburg Mar 24, 2026
3770442
Fix Foundry auth in workflow samples
eavanvalkenburg Mar 24, 2026
fbbf887
Stabilize Python integration workflows
eavanvalkenburg Mar 24, 2026
dd4056c
Update hosting samples for Foundry
eavanvalkenburg Mar 24, 2026
826e84c
Trigger full CI rerun
eavanvalkenburg Mar 24, 2026
f41e7d3
Trigger CI rerun again
eavanvalkenburg Mar 24, 2026
ff87967
trigger rerun
eavanvalkenburg Mar 24, 2026
4607ae2
trigger rerun
eavanvalkenburg Mar 24, 2026
6e5f737
fix for litellm
eavanvalkenburg Mar 24, 2026
d80c394
undo durabletask changes
eavanvalkenburg Mar 24, 2026
4ce2197
Move Foundry APIs into foundry namespace
eavanvalkenburg Mar 24, 2026
c73527d
Fix Foundry pyproject formatting
eavanvalkenburg Mar 24, 2026
930c66c
Split provider samples by Foundry surface
eavanvalkenburg Mar 24, 2026
ca60e3e
Restore hosting sample requirements
eavanvalkenburg Mar 24, 2026
d6da41a
updated tests
eavanvalkenburg Mar 24, 2026
477e14e
udpated foundry integration tests
eavanvalkenburg Mar 24, 2026
859d2a3
removed dist from azurefunctions tests
eavanvalkenburg Mar 24, 2026
a1f908b
Use separate Foundry clients for concurrent agents
eavanvalkenburg Mar 24, 2026
ff6abb5
fix client setup in azfunc and durable
eavanvalkenburg Mar 24, 2026
d14c450
disabled two tests
eavanvalkenburg Mar 24, 2026
f332654
updated setup for some function and durable tests
eavanvalkenburg Mar 25, 2026
9a09136
improved azure openai setup with new clients
eavanvalkenburg Mar 25, 2026
492a76e
ignore deprecated
eavanvalkenburg Mar 25, 2026
c770318
fixes
eavanvalkenburg Mar 25, 2026
1c88643
skip 11
eavanvalkenburg Mar 25, 2026
7bdfb1d
remove openai assistants int tests
eavanvalkenburg Mar 25, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
166 changes: 166 additions & 0 deletions .github/actions/setup-local-mcp-server/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
name: Setup Local MCP Server
description: Start and validate a local streamable HTTP MCP server for integration tests

inputs:
fallback_url:
description: Existing LOCAL_MCP_URL value to keep as a fallback if local startup fails
required: false
default: ''
host:
description: Host interface to bind the local MCP server
required: false
default: '127.0.0.1'
port:
description: Port to bind the local MCP server
required: false
default: '8011'
mount_path:
description: Mount path for the local streamable HTTP MCP endpoint
required: false
default: '/mcp'

outputs:
effective_url:
description: Local MCP URL when startup succeeds, otherwise the provided fallback URL
value: ${{ steps.start.outputs.effective_url }}
local_url:
description: URL of the local MCP server
value: ${{ steps.start.outputs.local_url }}
started:
description: Whether the local MCP server started and passed validation
value: ${{ steps.start.outputs.started }}
pid:
description: PID of the local MCP server process when startup succeeded
value: ${{ steps.start.outputs.pid }}

runs:
using: composite
steps:
- name: Start and validate local MCP server
id: start
shell: bash
run: |
set -euo pipefail

host="${{ inputs.host }}"
port="${{ inputs.port }}"
mount_path="${{ inputs.mount_path }}"
fallback_url="${{ inputs.fallback_url }}"

if [[ ! "$mount_path" =~ ^/ ]]; then
mount_path="/$mount_path"
fi

local_url="http://${host}:${port}${mount_path}"
health_url="http://${host}:${port}/healthz"
log_file="$RUNNER_TEMP/local-mcp-server.log"
pid_file="$RUNNER_TEMP/local-mcp-server.pid"
rm -f "$log_file" "$pid_file"

server_pid="$(
python3 - "$GITHUB_WORKSPACE/python" "$log_file" "$host" "$port" "$mount_path" <<'PY'
from __future__ import annotations

import subprocess
import sys

workspace, log_file, host, port, mount_path = sys.argv[1:]

with open(log_file, "w", encoding="utf-8") as log:
process = subprocess.Popen(
[
"uv",
"run",
"python",
"scripts/local_mcp_streamable_http_server.py",
"--host",
host,
"--port",
port,
"--mount-path",
mount_path,
],
cwd=workspace,
stdout=log,
stderr=subprocess.STDOUT,
start_new_session=True,
)

print(process.pid)
PY
)"
echo "$server_pid" > "$pid_file"

started=false
for _ in $(seq 1 30); do
if curl --silent --fail "$health_url" >/dev/null; then
started=true
break
fi
if ! kill -0 "$server_pid" 2>/dev/null; then
break
fi
sleep 1
done

if [[ "$started" == "true" ]]; then
if ! (
cd "$GITHUB_WORKSPACE/python"
LOCAL_MCP_URL="$local_url" uv run python - <<'PY'
from __future__ import annotations

import asyncio
import os

from agent_framework import Content, MCPStreamableHTTPTool


def result_to_text(result: str | list[Content]) -> str:
if isinstance(result, str):
return result
return "\n".join(content.text for content in result if content.type == "text" and content.text)


async def main() -> None:
tool = MCPStreamableHTTPTool(
name="local_ci_mcp",
url=os.environ["LOCAL_MCP_URL"],
approval_mode="never_require",
)

async with tool:
assert tool.functions, "Local MCP server did not expose any tools."
result = result_to_text(await tool.functions[0].invoke(query="What is Agent Framework?"))
assert result, "Local MCP server returned an empty response."


asyncio.run(main())
PY
); then
started=false
fi
fi

effective_url="$local_url"
pid="$server_pid"

if [[ "$started" != "true" ]]; then
effective_url="$fallback_url"
pid=""
if kill -0 "$server_pid" 2>/dev/null; then
kill -TERM -- "-$server_pid" 2>/dev/null || kill -TERM "$server_pid" || true
sleep 1
kill -KILL -- "-$server_pid" 2>/dev/null || kill -KILL "$server_pid" || true
fi
echo "Local MCP server was unavailable; continuing with fallback LOCAL_MCP_URL."
if [[ -f "$log_file" ]]; then
tail -n 100 "$log_file" || true
fi
else
echo "Using local MCP server at $local_url"
fi

echo "started=$started" >> "$GITHUB_OUTPUT"
echo "local_url=$local_url" >> "$GITHUB_OUTPUT"
echo "effective_url=$effective_url" >> "$GITHUB_OUTPUT"
echo "pid=$pid" >> "$GITHUB_OUTPUT"
3 changes: 1 addition & 2 deletions .github/workflows/python-check-coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@
"packages.purview.agent_framework_purview",
"packages.anthropic.agent_framework_anthropic",
"packages.azure-ai-search.agent_framework_azure_ai_search",
"packages.core.agent_framework.azure",
"packages.core.agent_framework.openai",
"packages.openai.agent_framework_openai",
# Individual files (if you want to enforce specific files instead of whole packages)
"packages/core/agent_framework/observability.py",
# Add more targets here as coverage improves
Expand Down
56 changes: 48 additions & 8 deletions .github/workflows/python-integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ jobs:
OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI__CHATMODELID }}
OPENAI_RESPONSES_MODEL_ID: ${{ vars.OPENAI__RESPONSESMODELID }}
OPENAI_EMBEDDINGS_MODEL_ID: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }}
OPENAI_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }}
OPENAI_EMBEDDING_MODEL: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }}
OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }}
defaults:
run:
Expand All @@ -81,8 +83,8 @@ jobs:
- name: Test with pytest (OpenAI integration)
run: >
uv run pytest --import-mode=importlib
packages/core/tests/openai
-m integration
packages/openai/tests
-m "integration and not azure"
-n logical --dist worksteal
--timeout=120 --session-timeout=900 --timeout_method thread
--retries 2 --retry-delay 5
Expand All @@ -94,8 +96,9 @@ jobs:
environment: integration
timeout-minutes: 60
env:
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__CHATDEPLOYMENTNAME }}
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }}
AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }}
AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }}
AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__EMBEDDINGDEPLOYMENTNAME }}
AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }}
defaults:
Expand All @@ -121,7 +124,9 @@ jobs:
- name: Test with pytest (Azure OpenAI integration)
run: >
uv run pytest --import-mode=importlib
packages/core/tests/azure
packages/openai/tests/openai/test_openai_chat_completion_client_azure.py
packages/openai/tests/openai/test_openai_chat_client_azure.py
packages/azure-ai/tests/azure_openai
-m integration
-n logical --dist worksteal
--timeout=120 --session-timeout=900 --timeout_method thread
Expand Down Expand Up @@ -151,6 +156,13 @@ jobs:
with:
python-version: ${{ env.UV_PYTHON }}
os: ${{ runner.os }}
- name: Start local MCP server
id: local-mcp
uses: ./.github/actions/setup-local-mcp-server
with:
fallback_url: ${{ env.LOCAL_MCP_URL }}
- name: Prefer local MCP URL when available
run: echo "LOCAL_MCP_URL=${{ steps.local-mcp.outputs.effective_url }}" >> "$GITHUB_ENV"
- name: Test with pytest (Anthropic, Ollama, MCP integration)
run: >
uv run pytest --import-mode=importlib
Expand All @@ -161,6 +173,26 @@ jobs:
-n logical --dist worksteal
--timeout=120 --session-timeout=900 --timeout_method thread
--retries 2 --retry-delay 5
- name: Stop local MCP server
if: always()
shell: bash
run: |
set -euo pipefail
server_pid="${{ steps.local-mcp.outputs.pid }}"
if [[ -z "$server_pid" ]]; then
exit 0
fi
if ! kill -0 "$server_pid" 2>/dev/null; then
exit 0
fi
kill -TERM -- "-$server_pid" 2>/dev/null || kill -TERM "$server_pid" 2>/dev/null || true
for _ in $(seq 1 10); do
if ! kill -0 "$server_pid" 2>/dev/null; then
exit 0
fi
sleep 1
done
kill -KILL -- "-$server_pid" 2>/dev/null || kill -KILL "$server_pid" 2>/dev/null || true

# Azure Functions + Durable Task integration tests
python-tests-functions:
Expand All @@ -172,10 +204,13 @@ jobs:
UV_PYTHON: "3.11"
OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI__CHATMODELID }}
OPENAI_RESPONSES_MODEL_ID: ${{ vars.OPENAI__RESPONSESMODELID }}
OPENAI_MODEL: ${{ vars.OPENAI__RESPONSESMODELID }}
OPENAI_API_KEY: ${{ secrets.OPENAI__APIKEY }}
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__CHATDEPLOYMENTNAME }}
AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }}
OPENAI_EMBEDDING_MODEL: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }}
AZURE_OPENAI_ENDPOINT: ${{ vars.AZUREOPENAI__ENDPOINT }}
AZURE_OPENAI_DEPLOYMENT_NAME: ${{ vars.AZUREOPENAI__RESPONSESDEPLOYMENTNAME }}
FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }}
FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }}
FUNCTIONS_WORKER_RUNTIME: "python"
DURABLE_TASK_SCHEDULER_CONNECTION_STRING: "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"
AzureWebJobsStorage: "UseDevelopmentStorage=true"
Expand Down Expand Up @@ -209,7 +244,8 @@ jobs:
packages/durabletask/tests/integration_tests
-m integration
-n logical --dist worksteal
--timeout=120 --session-timeout=900 --timeout_method thread
-x
--timeout=360 --session-timeout=900 --timeout_method thread
--retries 2 --retry-delay 5

# Azure AI integration tests
Expand All @@ -221,6 +257,8 @@ jobs:
env:
AZURE_AI_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }}
AZURE_AI_MODEL_DEPLOYMENT_NAME: ${{ vars.AZUREAI__DEPLOYMENTNAME }}
FOUNDRY_PROJECT_ENDPOINT: ${{ secrets.AZUREAI__ENDPOINT }}
FOUNDRY_MODEL: ${{ vars.AZUREAI__DEPLOYMENTNAME }}
LOCAL_MCP_URL: ${{ vars.LOCAL_MCP__URL }}
defaults:
run:
Expand All @@ -244,7 +282,9 @@ jobs:
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Test with pytest
timeout-minutes: 15
run: uv run --directory packages/azure-ai poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5
run: |
uv run --directory packages/azure-ai poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5
uv run --directory packages/foundry poe integration-tests -n logical --dist worksteal --timeout=120 --session-timeout=900 --timeout_method thread --retries 2 --retry-delay 5

# Azure Cosmos integration tests
python-tests-cosmos:
Expand Down
Loading
Loading