diff --git a/.gitignore b/.gitignore
index fd2925178a..022da923b2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -84,6 +84,7 @@ devtools/visual-testing/pnpm-lock.yaml
__pycache__/
*.pyc
*.pyo
+venv/
# Generated platform companion build metadata (do not commit)
packages/sdk/langs/python/platforms/**/*.egg-info/
# Generated python packaging metadata (do not commit)
diff --git a/apps/docs/docs.json b/apps/docs/docs.json
index af1f72e509..0d442c4d47 100644
--- a/apps/docs/docs.json
+++ b/apps/docs/docs.json
@@ -115,6 +115,7 @@
"group": "AI Agents",
"pages": [
"document-engine/ai-agents/llm-tools",
+ "document-engine/ai-agents/integrations",
"document-engine/ai-agents/skills",
"document-engine/ai-agents/mcp-server"
]
@@ -318,6 +319,10 @@
"source": "/document-engine/mcp",
"destination": "/document-engine/ai-agents/mcp-server"
},
+ {
+ "source": "/document-engine/ai-agents/providers",
+ "destination": "/document-engine/ai-agents/integrations"
+ },
{
"source": "/core/superdoc/properties",
"destination": "/core/superdoc/methods#properties"
diff --git a/apps/docs/document-engine/ai-agents/integrations.mdx b/apps/docs/document-engine/ai-agents/integrations.mdx
new file mode 100644
index 0000000000..dfc81f5ed8
--- /dev/null
+++ b/apps/docs/document-engine/ai-agents/integrations.mdx
@@ -0,0 +1,499 @@
+---
+title: Integrations
+sidebarTitle: Integrations
+tag: NEW
+description: Connect SuperDoc tools to AWS Bedrock, Google Vertex AI, Vercel AI SDK, LangChain, and more
+keywords: "ai integrations, ai providers, agent frameworks, aws bedrock, google vertex, vercel ai, langchain, openai, anthropic, tool use, function calling, superdoc sdk"
+---
+
+SuperDoc tools work with any LLM provider or agent framework that supports tool use. The SDK ships tool definitions in multiple formats — pick the one that matches your stack, write a conversation loop (or let the framework handle it), and dispatch tool calls through the SDK.
+
+Each example below opens a document, gives the model SuperDoc tools, and lets it review and edit the content.
+
+
+LLM tools are in alpha. Tool names and schemas may change between releases.
+
+
+## Cloud platforms
+
+Use SuperDoc tools with cloud AI platforms. You write the agentic loop and control the conversation directly.
+
+
+
+
+
+ ```bash
+ npm install @superdoc-dev/sdk @aws-sdk/client-bedrock-runtime
+ ```
+
+ ```typescript
+ import { BedrockRuntimeClient, ConverseCommand } from '@aws-sdk/client-bedrock-runtime';
+ import {
+ createSuperDocClient, chooseTools, dispatchSuperDocTool,
+ formatToolResult, mergeDiscoveredTools,
+ } from '@superdoc-dev/sdk';
+
+ const client = createSuperDocClient();
+ await client.connect();
+ await client.doc.open({ doc: './contract.docx' });
+
+ // Anthropic format → Bedrock toolSpec shape
+ const { tools } = await chooseTools({ provider: 'anthropic' });
+ const toolConfig = { tools: [] };
+ mergeDiscoveredTools(toolConfig, { tools }, { provider: 'anthropic', target: 'bedrock' });
+
+ const bedrock = new BedrockRuntimeClient({ region: 'us-east-1' });
+ const messages = [
+ { role: 'user', content: [{ text: 'Review this contract.' }] },
+ ];
+
+ while (true) {
+ const res = await bedrock.send(new ConverseCommand({
+ modelId: 'us.anthropic.claude-sonnet-4-6',
+ messages,
+ system: [{ text: 'You edit .docx files using SuperDoc tools.' }],
+ toolConfig,
+ }));
+
+ const output = res.output?.message;
+ if (!output) break;
+ messages.push(output);
+
+ const toolUses = output.content?.filter((b) => b.toolUse) ?? [];
+ if (!toolUses.length) break;
+
+ const results = [];
+ for (const block of toolUses) {
+ const { name, input, toolUseId } = block.toolUse;
+ const result = await dispatchSuperDocTool(client, name, input ?? {});
+ results.push(formatToolResult(result, { target: 'bedrock', toolUseId }));
+ }
+ messages.push({ role: 'user', content: results });
+ }
+
+ await client.doc.save({ inPlace: true });
+ await client.dispose();
+ ```
+
+
+ ```bash
+ pip install superdoc-sdk boto3
+ ```
+
+ ```python
+ import boto3
+ from superdoc import (
+ SuperDocClient, choose_tools, dispatch_superdoc_tool,
+ format_tool_result, merge_discovered_tools,
+ )
+
+ client = SuperDocClient()
+ client.connect()
+ client.doc.open(doc="./contract.docx")
+
+ # Anthropic format → Bedrock toolSpec shape
+ sd_tools = choose_tools(provider="anthropic")
+ tool_config = {"tools": []}
+ merge_discovered_tools(tool_config, sd_tools, provider="anthropic", target="bedrock")
+
+ bedrock = boto3.client("bedrock-runtime", region_name="us-east-1")
+ messages = [{"role": "user", "content": [{"text": "Review this contract."}]}]
+
+ while True:
+ response = bedrock.converse(
+ modelId="us.anthropic.claude-sonnet-4-6",
+ messages=messages,
+ system=[{"text": "You edit .docx files using SuperDoc tools."}],
+ toolConfig=tool_config,
+ )
+
+ output = response["output"]["message"]
+ messages.append(output)
+
+ tool_uses = [b for b in output.get("content", []) if "toolUse" in b]
+ if not tool_uses:
+ break
+
+ tool_results = []
+ for block in tool_uses:
+ tu = block["toolUse"]
+ result = dispatch_superdoc_tool(client, tu["name"], tu.get("input", {}))
+ tool_results.append(
+ format_tool_result(result, target="bedrock", tool_use_id=tu["toolUseId"])
+ )
+ messages.append({"role": "user", "content": tool_results})
+
+ client.doc.save(in_place=True)
+ client.dispose()
+ ```
+
+
+
+ **Auth**: AWS credentials via `aws configure`, env vars, or IAM role. No API key needed.
+
+
+
+
+
+ ```bash
+ npm install @superdoc-dev/sdk @google-cloud/vertexai
+ ```
+
+ ```typescript
+ import { VertexAI } from '@google-cloud/vertexai';
+ import {
+ createSuperDocClient, chooseTools, dispatchSuperDocTool,
+ sanitizeToolSchemas,
+ } from '@superdoc-dev/sdk';
+
+ const client = createSuperDocClient();
+ await client.connect();
+ await client.doc.open({ doc: './contract.docx' });
+
+ // Generic format → Vertex function declarations (sanitized for Vertex compatibility)
+ const { tools } = await chooseTools({ provider: 'generic' });
+ const sanitized = sanitizeToolSchemas(tools, 'vertex');
+ const vertexTools = [{
+ functionDeclarations: sanitized.map((t) => ({
+ name: t.name,
+ description: t.description,
+ parameters: t.parameters,
+ })),
+ }];
+
+ const vertexAI = new VertexAI({ project: 'your-project', location: 'us-central1' });
+ const model = vertexAI.getGenerativeModel({
+ model: 'gemini-2.5-pro',
+ tools: vertexTools,
+ systemInstruction: { role: 'system', parts: [{ text: 'You edit .docx files using SuperDoc tools.' }] },
+ });
+
+ const chat = model.startChat();
+ let response = await chat.sendMessage([{ text: 'Review this contract.' }]);
+
+ while (true) {
+ const parts = response.response.candidates?.[0]?.content.parts ?? [];
+ const calls = parts.filter((p) => p.functionCall);
+ if (!calls.length) break;
+
+ const results = [];
+ for (const part of calls) {
+ const { name, args } = part.functionCall;
+ const result = await dispatchSuperDocTool(client, name, args ?? {});
+ results.push({ functionResponse: { name, response: result } });
+ }
+ response = await chat.sendMessage(results);
+ }
+
+ await client.doc.save({ inPlace: true });
+ await client.dispose();
+ ```
+
+
+ ```bash
+ pip install superdoc-sdk google-cloud-aiplatform
+ ```
+
+ ```python
+ import vertexai
+ from vertexai.generative_models import GenerativeModel, Tool, FunctionDeclaration, Part
+ from superdoc import SuperDocClient, choose_tools, dispatch_superdoc_tool, sanitize_tool_schemas
+
+ client = SuperDocClient()
+ client.connect()
+ client.doc.open(doc="./contract.docx")
+
+ # Generic format → Vertex function declarations (sanitized for Vertex compatibility)
+ result = choose_tools(provider="generic")
+ sanitized = sanitize_tool_schemas(result["tools"], "vertex")
+ vertex_tools = [Tool(function_declarations=[
+ FunctionDeclaration(name=t["name"], description=t["description"], parameters=t["parameters"])
+ for t in sanitized
+ ])]
+
+ vertexai.init(project="your-project", location="us-central1")
+ model = GenerativeModel(
+ "gemini-2.5-pro",
+ tools=vertex_tools,
+ system_instruction="You edit .docx files using SuperDoc tools.",
+ )
+ chat = model.start_chat()
+ response = chat.send_message("Review this contract.")
+
+ while True:
+ calls = [p for p in response.candidates[0].content.parts if p.function_call.name]
+ if not calls:
+ break
+
+ responses = []
+ for part in calls:
+ name = part.function_call.name
+ args = dict(part.function_call.args) if part.function_call.args else {}
+ result = dispatch_superdoc_tool(client, name, args)
+ responses.append(Part.from_function_response(name=name, response=result))
+ response = chat.send_message(responses)
+
+ client.doc.save(in_place=True)
+ client.dispose()
+ ```
+
+
+
+ **Auth**: `gcloud auth application-default login` or a service account key.
+
+
+
+## Agent frameworks
+
+Use SuperDoc tools with agent frameworks. The framework manages the agentic loop — you configure tools and let it run.
+
+
+
+ ```bash
+ npm install @superdoc-dev/sdk ai @ai-sdk/openai zod
+ ```
+
+ ```typescript
+ import { generateText, tool } from 'ai';
+ import { openai } from '@ai-sdk/openai';
+ import { createSuperDocClient, chooseTools, dispatchSuperDocTool } from '@superdoc-dev/sdk';
+ import { z } from 'zod';
+
+ const client = createSuperDocClient();
+ await client.connect();
+ await client.doc.open({ doc: './contract.docx' });
+
+ // All tools — no discover_tools since the framework manages a fixed tool set
+ const { tools: sdTools } = await chooseTools({ provider: 'vercel', mode: 'all' });
+
+ // Wrap as Vercel AI tool() objects
+ const tools = {};
+ for (const t of sdTools) {
+ tools[t.function.name] = tool({
+ description: t.function.description,
+ parameters: z.object({}).passthrough(),
+ execute: async (args) =>
+ dispatchSuperDocTool(client, t.function.name, args),
+ });
+ }
+
+ // generateText handles the agentic loop automatically
+ const result = await generateText({
+ model: openai('gpt-4o'),
+ system: 'You edit .docx files using SuperDoc tools.',
+ prompt: 'Review this contract.',
+ tools,
+ maxSteps: 20,
+ });
+
+ console.log(result.text);
+ await client.doc.save({ inPlace: true });
+ await client.dispose();
+ ```
+
+ **Auth**: `OPENAI_API_KEY` env var. Swap `openai(...)` for `anthropic(...)`, `google(...)`, etc.
+
+
+
+
+
+ ```bash
+ npm install @superdoc-dev/sdk @langchain/openai @langchain/core @langchain/langgraph zod
+ ```
+
+ ```typescript
+ import { ChatOpenAI } from '@langchain/openai';
+ import { DynamicStructuredTool } from '@langchain/core/tools';
+ import { createReactAgent } from '@langchain/langgraph/prebuilt';
+ import { HumanMessage } from '@langchain/core/messages';
+ import { z } from 'zod';
+ import { createSuperDocClient, chooseTools, dispatchSuperDocTool } from '@superdoc-dev/sdk';
+
+ const client = createSuperDocClient();
+ await client.connect();
+ await client.doc.open({ doc: './contract.docx' });
+
+ // All tools — no discover_tools since the framework manages a fixed tool set
+ const { tools: sdTools } = await chooseTools({ provider: 'generic', mode: 'all' });
+
+ // Wrap as LangChain DynamicStructuredTool objects
+ const tools = sdTools.map(
+ (t) => new DynamicStructuredTool({
+ name: t.name,
+ description: t.description,
+ schema: z.object({}).passthrough(),
+ func: async (args) => {
+ const result = await dispatchSuperDocTool(client, t.name, args);
+ return JSON.stringify(result);
+ },
+ }),
+ );
+
+ const agent = createReactAgent({
+ llm: new ChatOpenAI({ model: 'gpt-4o' }),
+ tools,
+ prompt: 'You edit .docx files using SuperDoc tools.',
+ });
+
+ const result = await agent.invoke({
+ messages: [new HumanMessage('Review this contract.')],
+ });
+
+ console.log(result.messages.at(-1).content);
+ await client.doc.save({ inPlace: true });
+ await client.dispose();
+ ```
+
+
+ ```bash
+ pip install superdoc-sdk langchain-openai langgraph
+ ```
+
+ ```python
+ import json
+ from langchain_openai import ChatOpenAI
+ from langchain_core.tools import StructuredTool
+ from langgraph.prebuilt import create_react_agent
+ from langchain_core.messages import HumanMessage
+ from superdoc import SuperDocClient, choose_tools, dispatch_superdoc_tool
+
+ client = SuperDocClient()
+ client.connect()
+ client.doc.open(doc="./contract.docx")
+
+ # All tools — no discover_tools since the framework manages a fixed tool set
+ result = choose_tools(provider="generic", mode="all")
+
+ # Wrap as LangChain StructuredTool objects
+ def make_tool(t):
+ def invoke(**kwargs) -> str:
+ return json.dumps(dispatch_superdoc_tool(client, t["name"], kwargs))
+ return StructuredTool.from_function(
+ func=invoke, name=t["name"], description=t["description"],
+ infer_schema=False,
+ )
+
+ tools = [make_tool(t) for t in result["tools"]]
+
+ agent = create_react_agent(
+ model=ChatOpenAI(model="gpt-4o"),
+ tools=tools,
+ prompt="You edit .docx files using SuperDoc tools.",
+ )
+
+ result = agent.invoke(
+ {"messages": [HumanMessage(content="Review this contract.")]}
+ )
+ print(result["messages"][-1].content)
+
+ client.doc.save(in_place=True)
+ client.dispose()
+ ```
+
+
+
+ **Auth**: `OPENAI_API_KEY` env var. Swap `ChatOpenAI` for `ChatAnthropic`, `ChatGoogleGenerativeAI`, etc.
+
+
+
+## Tool format reference
+
+The SDK ships pre-formatted tools for each integration. The conversion is minimal:
+
+| Integration | Type | SDK format | SDK helpers | Native shape |
+|-------------|------|-----------|-------------|--------------|
+| AWS Bedrock | Cloud platform | `anthropic` | `mergeDiscoveredTools`, `formatToolResult`, `formatToolError` | `{ toolSpec: { name, description, inputSchema: { json } } }` |
+| Google Vertex AI | Cloud platform | `generic` | `sanitizeToolSchemas`, `mergeDiscoveredTools` | `{ functionDeclarations: [...] }` |
+| Vercel AI SDK | Framework | `vercel` | — | Wrap in Vercel `tool()` with `z.object({}).passthrough()` |
+| LangChain | Framework | `generic` | — | Wrap in `DynamicStructuredTool` |
+| OpenAI | Direct API | `openai` | `formatToolResult` | Pass directly |
+| Anthropic | Direct API | `anthropic` | `formatToolResult` | Pass directly |
+
+## The discover_tools pattern
+
+All provider examples above use **essential mode** (default) — 5 core tools plus `discover_tools`. When the model needs more tools (comments, formatting, tables, etc.), it calls `discover_tools` to load them dynamically.
+
+Handle this in your agentic loop. Since `discover_tools` is a meta-tool (not a document operation), intercept it before dispatching:
+
+```typescript
+import { chooseTools, mergeDiscoveredTools } from '@superdoc-dev/sdk';
+
+if (name === 'discover_tools') {
+ // discover_tools is a meta-tool — handle client-side via chooseTools
+ const discovered = await chooseTools({ provider: 'anthropic', groups: args.groups });
+ mergeDiscoveredTools(toolConfig, discovered, { provider: 'anthropic', target: 'bedrock' });
+ result = discovered;
+} else {
+ result = await dispatchSuperDocTool(client, name, args);
+}
+```
+
+
+Framework-managed examples (Vercel AI, LangChain) use `mode: 'all'` instead, since they can't inject new tools mid-conversation.
+
+
+See the [LLM Tools guide](/document-engine/ai-agents/llm-tools#the-discover_tools-pattern) for details.
+
+## Tracked changes
+
+For contract review workflows, you typically want all edits to appear as tracked changes so a human can accept or reject them. Two approaches:
+
+### Instruct the model
+
+Tell the model to use `changeMode: "tracked"` in its `apply_mutations` calls:
+
+```markdown
+## System prompt
+All edits must use changeMode: "tracked" so they appear as
+tracked changes for human review.
+```
+
+### Use the headless editor
+
+If you don't need agentic tool use — just want the model to suggest edits — use the headless editor with `documentMode: 'suggesting'`:
+
+```typescript
+import { Editor } from 'superdoc/super-editor';
+import { readFile, writeFile } from 'node:fs/promises';
+
+const docx = await readFile('./contract.docx');
+const editor = await Editor.open(docx, { documentMode: 'suggesting' });
+
+// Get suggestions from your LLM (structured output, no tool use)
+const suggestions = await getSuggestions(editor.state.doc.textContent);
+
+// Apply each suggestion as a tracked change
+for (const s of suggestions) {
+ const matches = editor.commands.search(s.find, { highlight: false });
+ if (!matches.length) continue;
+
+ editor.commands.insertTrackedChange({
+ from: matches[0].from,
+ to: matches[0].to,
+ text: s.replace,
+ user: { name: 'AI Reviewer', email: 'ai@example.com' },
+ });
+}
+
+const result = await editor.exportDocx();
+await writeFile('./reviewed.docx', Buffer.from(result));
+editor.destroy();
+```
+
+## Best practices
+
+- **Start with essential mode.** Load 5 tools + `discover_tools`. The model loads more groups when needed. This keeps token usage low.
+- **Use `apply_mutations` for text edits.** It batches multiple rewrites in one call, reducing round trips.
+- **Feed errors back.** When a tool call fails, return the error as a tool result. Most models self-correct on the next turn.
+- **Pin your model version.** Use a specific model ID rather than an alias to avoid behavior changes between releases.
+
+## Example repository
+
+Complete, runnable examples for all cloud platforms and frameworks (Node.js and Python) are available at [`examples/ai/`](https://github.com/superdoc-dev/superdoc/tree/main/examples/ai).
+
+## Related
+
+- [LLM Tools](/document-engine/ai-agents/llm-tools) — tool selection, dispatch, and the full API
+- [Skills](/document-engine/ai-agents/skills) — reusable prompt templates
+- [MCP Server](/document-engine/ai-agents/mcp-server) — Model Context Protocol integration
+- [SDKs](/document-engine/sdks) — typed Node.js and Python wrappers
diff --git a/apps/docs/document-engine/ai-agents/llm-tools.mdx b/apps/docs/document-engine/ai-agents/llm-tools.mdx
index 12ed982943..0edcd6a691 100644
--- a/apps/docs/document-engine/ai-agents/llm-tools.mdx
+++ b/apps/docs/document-engine/ai-agents/llm-tools.mdx
@@ -226,17 +226,21 @@ Tools are organized into 11 groups. In essential mode, the LLM can load any grou
## The discover_tools pattern
-When the LLM needs tools beyond the essential set, it calls `discover_tools` with the groups it wants. Your agentic loop handles this like any other tool call — `dispatchSuperDocTool` returns the new tool definitions, and you merge them into the next request.
+When the LLM needs tools beyond the essential set, it calls `discover_tools` with the groups it wants. Since `discover_tools` is a meta-tool (not a document operation), intercept it before `dispatchSuperDocTool` and handle it client-side via `chooseTools`:
```typescript
+import { chooseTools } from '@superdoc-dev/sdk';
+
for (const call of message.tool_calls) {
- const result = await dispatchSuperDocTool(
- client, call.function.name, JSON.parse(call.function.arguments),
- );
+ let result;
+ const args = JSON.parse(call.function.arguments);
- // discover_tools returns new tool definitions — merge them
if (call.function.name === 'discover_tools') {
+ // Meta-tool — resolve client-side, then merge new tools
+ result = await chooseTools({ provider: 'openai', groups: args.groups });
tools.push(...result.tools);
+ } else {
+ result = await dispatchSuperDocTool(client, call.function.name, args);
}
messages.push({
diff --git a/apps/docs/scripts/validate-code-imports.ts b/apps/docs/scripts/validate-code-imports.ts
index 0601c995ca..81fceaee00 100644
--- a/apps/docs/scripts/validate-code-imports.ts
+++ b/apps/docs/scripts/validate-code-imports.ts
@@ -52,6 +52,8 @@ const EXACT_EXTERNAL_IMPORTS = new Set([
'cors',
'pg',
'ioredis',
+ 'ai',
+ 'zod',
]);
const PREFIX_EXTERNAL_IMPORTS = [
@@ -64,6 +66,9 @@ const PREFIX_EXTERNAL_IMPORTS = [
'@liveblocks/',
'@fastify/',
'@aws-sdk/',
+ '@ai-sdk/',
+ '@google-cloud/',
+ '@langchain/',
'next/',
];
diff --git a/examples/README.md b/examples/README.md
index 1898c019fb..90736c6dcd 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -22,6 +22,30 @@ Minimal, self-contained examples showing how to use SuperDoc.
| [collaboration](./collaboration) | Real-time editing with various Yjs providers | [Guides](https://docs.superdoc.dev/guides) |
| [headless](./headless) | Server-side AI redlining with Node.js | [AI Agents](https://docs.superdoc.dev/getting-started/ai-agents) |
+## AI Integrations
+
+Connect SuperDoc's Document Engine to cloud AI platforms and agent frameworks.
+
+### Cloud Platforms
+
+| Integration | Description | Docs |
+|-------------|-------------|------|
+| [AWS Bedrock](./ai/bedrock) | Bedrock Converse API with tool use | [Integrations](https://docs.superdoc.dev/document-engine/ai-agents/integrations) |
+| [Google Vertex AI](./ai/vertex) | Gemini with function calling | [Integrations](https://docs.superdoc.dev/document-engine/ai-agents/integrations) |
+
+### Agent Frameworks
+
+| Integration | Description | Docs |
+|-------------|-------------|------|
+| [Vercel AI SDK](./ai/vercel-ai) | Any model via the Vercel AI SDK | [Integrations](https://docs.superdoc.dev/document-engine/ai-agents/integrations) |
+| [LangChain](./ai/langchain) | LangGraph ReAct agent | [Integrations](https://docs.superdoc.dev/document-engine/ai-agents/integrations) |
+
+### Demos
+
+| Example | Description | Docs |
+|---------|-------------|------|
+| [Contract Review](./ai/contract-review) | Full demo: agentic + headless contract review | [AI Agents](https://docs.superdoc.dev/getting-started/ai-agents) |
+
## Running an example
```bash
diff --git a/examples/ai/README.md b/examples/ai/README.md
new file mode 100644
index 0000000000..c1091a93f5
--- /dev/null
+++ b/examples/ai/README.md
@@ -0,0 +1,45 @@
+# AI Integration Examples
+
+Give LLMs structured access to document operations. Each example connects SuperDoc's Document Engine to a cloud AI platform or agent framework — open a doc, let the model review and edit it with tools, save the result.
+
+**Docs:** [Integrations](https://docs.superdoc.dev/document-engine/ai-agents/integrations) · [LLM Tools](https://docs.superdoc.dev/document-engine/ai-agents/llm-tools)
+
+## Cloud platforms
+
+You write the agentic loop and control the conversation directly.
+
+| Platform | Node.js | Python | Auth |
+|----------|---------|--------|------|
+| [AWS Bedrock](./bedrock) | `index.ts` | `index.py` | AWS credentials (`aws configure`) |
+| [Google Vertex AI](./vertex) | `index.ts` | `index.py` | Google Cloud credentials (`gcloud auth application-default login`) |
+
+## Agent frameworks
+
+The framework manages the agentic loop — you configure tools and let it run.
+
+| Framework | Node.js | Python | Auth |
+|-----------|---------|--------|------|
+| [Vercel AI SDK](./vercel-ai) | `index.ts` | — | `OPENAI_API_KEY` |
+| [LangChain](./langchain) | `index.ts` | `index.py` | `OPENAI_API_KEY` |
+
+## Full demo
+
+| Example | Description | Docs |
+|---------|-------------|------|
+| [contract-review](./contract-review) | AI-powered contract review with agentic and headless patterns | [AI Agents](https://docs.superdoc.dev/getting-started/ai-agents) |
+
+## Run
+
+```bash
+# Node.js
+cd bedrock
+npm install
+npx tsx index.ts contract.docx reviewed.docx
+
+# Python
+cd bedrock
+pip install superdoc-sdk boto3
+python index.py contract.docx reviewed.docx
+```
+
+Each integration needs different dependencies — see the README in each directory.
diff --git a/examples/ai/bedrock/README.md b/examples/ai/bedrock/README.md
new file mode 100644
index 0000000000..b1f0601843
--- /dev/null
+++ b/examples/ai/bedrock/README.md
@@ -0,0 +1,42 @@
+# SuperDoc + AWS Bedrock
+
+Agentic document editing using the Bedrock Converse API.
+
+**Docs:** [Integrations](https://docs.superdoc.dev/document-engine/ai-agents/integrations)
+
+## Prerequisites
+
+- AWS credentials configured (`aws configure` or environment variables)
+- Bedrock model access enabled in the [AWS console](https://console.aws.amazon.com/bedrock/)
+
+## Run
+
+### Node.js
+
+```bash
+npm install
+npx tsx index.ts contract.docx reviewed.docx
+```
+
+### Python
+
+```bash
+python -m venv venv && source venv/bin/activate
+pip install superdoc-sdk boto3
+python index.py contract.docx reviewed.docx
+```
+
+## Configuration
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `AWS_REGION` | `us-east-1` | AWS region with Bedrock access |
+| `BEDROCK_MODEL_ID` | `us.anthropic.claude-sonnet-4-6` | Any Bedrock model that supports tool use |
+
+## How it works
+
+1. Connects to SuperDoc via the SDK
+2. Loads tool definitions in Anthropic format — the same format Bedrock's Converse API expects
+3. Converts to Bedrock's `toolSpec` shape (3-line mapping)
+4. Runs an agentic loop: the model calls SuperDoc tools to read, query, and edit the document
+5. Saves the reviewed document
diff --git a/examples/ai/bedrock/contract.docx b/examples/ai/bedrock/contract.docx
new file mode 100644
index 0000000000..68f2475b14
Binary files /dev/null and b/examples/ai/bedrock/contract.docx differ
diff --git a/examples/ai/bedrock/index.py b/examples/ai/bedrock/index.py
new file mode 100644
index 0000000000..707dd72d8b
--- /dev/null
+++ b/examples/ai/bedrock/index.py
@@ -0,0 +1,106 @@
+"""
+SuperDoc + AWS Bedrock
+
+Minimal agentic loop: Claude on Bedrock uses SuperDoc tools
+to review and edit a Word document.
+
+Usage: python index.py [input.docx] [output.docx]
+
+Requires:
+ - pip install superdoc-sdk boto3
+ - AWS credentials configured (aws configure, env vars, or IAM role)
+"""
+
+import sys
+import os
+import shutil
+from pathlib import Path
+import boto3
+from superdoc import (
+ SuperDocClient,
+ choose_tools,
+ dispatch_superdoc_tool,
+ format_tool_result,
+ format_tool_error,
+ merge_discovered_tools,
+)
+
+MODEL_ID = os.environ.get("BEDROCK_MODEL_ID", "us.anthropic.claude-sonnet-4-6")
+REGION = os.environ.get("AWS_REGION", "us-east-1")
+
+
+def main():
+ args = sys.argv[1:]
+ input_path = str(Path(args[0] if args else "contract.docx").resolve())
+ output_path = str(Path(args[1] if len(args) > 1 else "reviewed.docx").resolve())
+
+ # 1. Connect to SuperDoc — copy to output path so the original is preserved
+ shutil.copy2(input_path, output_path)
+ client = SuperDocClient()
+ client.connect()
+ client.doc.open({"doc": output_path})
+
+ # 2. Get tools in Anthropic format and convert to Bedrock toolSpec shape
+ sd_tools = choose_tools({"provider": "anthropic"})
+ tool_config = {"tools": []}
+ merge_discovered_tools(tool_config, sd_tools, provider="anthropic", target="bedrock")
+
+ # 3. Agentic loop
+ bedrock = boto3.client("bedrock-runtime", region_name=REGION)
+ messages = [
+ {"role": "user", "content": [{"text": "Review this contract. Fix vague language and one-sided terms."}]}
+ ]
+
+ for _ in range(20):
+ response = bedrock.converse(
+ modelId=MODEL_ID,
+ messages=messages,
+ system=[{"text": "You edit .docx files using SuperDoc tools. Use tracked changes for all edits."}],
+ toolConfig=tool_config,
+ )
+
+ output = response["output"]["message"]
+ messages.append(output)
+
+ tool_uses = [b for b in output.get("content", []) if "toolUse" in b]
+ if not tool_uses:
+ # Print final response
+ for b in output.get("content", []):
+ if "text" in b:
+ print(b["text"])
+ break
+
+ tool_results = []
+ for block in tool_uses:
+ tool_use = block["toolUse"]
+ name = tool_use["name"]
+ print(f" Tool: {name}")
+
+ try:
+ if name == "discover_tools":
+ # discover_tools is a meta-tool — handle client-side via choose_tools
+ groups = tool_use.get("input", {}).get("groups")
+ discovered = choose_tools({"provider": "anthropic", "groups": groups})
+ merge_discovered_tools(tool_config, discovered, provider="anthropic", target="bedrock")
+ result = discovered
+ else:
+ result = dispatch_superdoc_tool(client, name, tool_use.get("input", {}))
+
+ tool_results.append(
+ format_tool_result(result, target="bedrock", tool_use_id=tool_use["toolUseId"])
+ )
+ except Exception as e:
+ tool_results.append(
+ format_tool_error(e, target="bedrock", tool_use_id=tool_use["toolUseId"])
+ )
+
+ messages.append({"role": "user", "content": tool_results})
+
+ # 4. Save (in-place to the copy)
+ client.doc.save()
+ client.dispose()
+ print(f"\nSaved to {output_path}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/ai/bedrock/index.ts b/examples/ai/bedrock/index.ts
new file mode 100644
index 0000000000..ec1b3059c4
--- /dev/null
+++ b/examples/ai/bedrock/index.ts
@@ -0,0 +1,108 @@
+/**
+ * SuperDoc + AWS Bedrock
+ *
+ * Minimal agentic loop: Claude on Bedrock uses SuperDoc tools
+ * to review and edit a Word document.
+ *
+ * Usage: npx tsx index.ts [input.docx] [output.docx]
+ *
+ * Requires: AWS credentials configured, Bedrock model access enabled.
+ */
+
+import path from 'node:path';
+import { copyFileSync } from 'node:fs';
+import {
+ BedrockRuntimeClient,
+ ConverseCommand,
+ type ContentBlock,
+ type Message,
+ type Tool,
+} from '@aws-sdk/client-bedrock-runtime';
+import {
+ createSuperDocClient,
+ chooseTools,
+ dispatchSuperDocTool,
+ formatToolResult,
+ formatToolError,
+ mergeDiscoveredTools,
+ type ToolGroup,
+} from '@superdoc-dev/sdk';
+
+const MODEL_ID = process.env.BEDROCK_MODEL_ID ?? 'us.anthropic.claude-sonnet-4-6';
+const REGION = process.env.AWS_REGION ?? 'us-east-1';
+
+async function main() {
+ const [rawInput = 'contract.docx', rawOutput = 'reviewed.docx'] = process.argv.slice(2);
+ const inputPath = path.resolve(rawInput);
+ const outputPath = path.resolve(rawOutput);
+
+ // 1. Connect to SuperDoc — copy to output path so the original is preserved
+ copyFileSync(inputPath, outputPath);
+ const client = createSuperDocClient();
+ await client.connect();
+ await client.doc.open({ doc: outputPath });
+
+ // 2. Get tools in Anthropic format and convert to Bedrock toolSpec shape
+ const { tools: sdTools } = await chooseTools({ provider: 'anthropic' });
+ const toolConfig = { tools: [] as Tool[] };
+ mergeDiscoveredTools(toolConfig, { tools: sdTools }, { provider: 'anthropic', target: 'bedrock' });
+
+ // 3. Agentic loop
+ const bedrock = new BedrockRuntimeClient({ region: REGION });
+ const messages: Message[] = [
+ { role: 'user', content: [{ text: 'Review this contract. Fix vague language and one-sided terms.' }] },
+ ];
+
+ for (let turn = 0; turn < 20; turn++) {
+ const response = await bedrock.send(
+ new ConverseCommand({
+ modelId: MODEL_ID,
+ messages,
+ system: [{ text: 'You edit .docx files using SuperDoc tools. Use tracked changes for all edits.' }],
+ toolConfig,
+ }),
+ );
+
+ const output = response.output?.message;
+ if (!output) break;
+ messages.push(output);
+
+ const toolUses = (output.content ?? []).filter((b): b is ContentBlock.ToolUseMember => 'toolUse' in b);
+ if (!toolUses.length) {
+ // Print final response
+ for (const b of output.content ?? []) if ('text' in b) console.log(b.text);
+ break;
+ }
+
+ const results: ContentBlock[] = [];
+ for (const block of toolUses) {
+ const { name, input, toolUseId } = block.toolUse!;
+ console.log(` Tool: ${name}`);
+ try {
+ let result: unknown;
+
+ if (name === 'discover_tools') {
+ // discover_tools is a meta-tool — handle client-side via chooseTools
+ const groups = ((input ?? {}) as Record).groups as ToolGroup[] | undefined;
+ const discovered = await chooseTools({ provider: 'anthropic', groups });
+ mergeDiscoveredTools(toolConfig, discovered, { provider: 'anthropic', target: 'bedrock' });
+ result = discovered;
+ } else {
+ result = await dispatchSuperDocTool(client, name!, (input ?? {}) as Record);
+ }
+
+ results.push(formatToolResult(result, { target: 'bedrock', toolUseId }) as ContentBlock);
+ } catch (err) {
+ results.push(formatToolError(err, { target: 'bedrock', toolUseId }) as ContentBlock);
+ }
+ }
+ messages.push({ role: 'user', content: results });
+ }
+
+ // 4. Save (in-place to the copy)
+ await client.doc.save();
+ await client.dispose();
+ console.log(`\nSaved to ${outputPath}`);
+}
+
+main().catch((e) => { console.error(e.message); process.exit(1); });
diff --git a/examples/ai/bedrock/package.json b/examples/ai/bedrock/package.json
new file mode 100644
index 0000000000..e93b95d91c
--- /dev/null
+++ b/examples/ai/bedrock/package.json
@@ -0,0 +1,15 @@
+{
+ "name": "superdoc-bedrock",
+ "private": true,
+ "type": "module",
+ "scripts": {
+ "start": "tsx index.ts"
+ },
+ "dependencies": {
+ "@aws-sdk/client-bedrock-runtime": "^3.750.0",
+ "@superdoc-dev/sdk": "latest"
+ },
+ "devDependencies": {
+ "tsx": "^4.21.0"
+ }
+}
diff --git a/examples/ai/langchain/README.md b/examples/ai/langchain/README.md
new file mode 100644
index 0000000000..18a36098f8
--- /dev/null
+++ b/examples/ai/langchain/README.md
@@ -0,0 +1,52 @@
+# SuperDoc + LangChain
+
+Agentic document editing using a LangGraph ReAct agent.
+
+**Docs:** [Integrations](https://docs.superdoc.dev/document-engine/ai-agents/integrations)
+
+## Prerequisites
+
+- `OPENAI_API_KEY` environment variable (or swap the model)
+
+## Run
+
+### Node.js
+
+```bash
+npm install
+OPENAI_API_KEY=sk-... npx tsx index.ts contract.docx reviewed.docx
+```
+
+### Python
+
+```bash
+python -m venv venv && source venv/bin/activate
+pip install superdoc-sdk langchain-openai langgraph
+OPENAI_API_KEY=sk-... python index.py contract.docx reviewed.docx
+```
+
+## Configuration
+
+The example uses OpenAI by default. Swap the model class to use any LangChain-compatible provider:
+
+```python
+# OpenAI (default)
+from langchain_openai import ChatOpenAI
+model = ChatOpenAI(model="gpt-4o")
+
+# Anthropic
+from langchain_anthropic import ChatAnthropic
+model = ChatAnthropic(model="claude-sonnet-4-6-20250725")
+
+# Google
+from langchain_google_genai import ChatGoogleGenerativeAI
+model = ChatGoogleGenerativeAI(model="gemini-2.5-pro")
+```
+
+## How it works
+
+1. Connects to SuperDoc via the SDK
+2. Loads tool definitions in generic format and wraps them as LangChain `StructuredTool` / `DynamicStructuredTool` objects
+3. Creates a ReAct agent with `create_react_agent`
+4. The agent calls SuperDoc tools to read, query, and edit the document
+5. Saves the reviewed document
diff --git a/examples/ai/langchain/contract.docx b/examples/ai/langchain/contract.docx
new file mode 100644
index 0000000000..5b911d3399
Binary files /dev/null and b/examples/ai/langchain/contract.docx differ
diff --git a/examples/ai/langchain/index.py b/examples/ai/langchain/index.py
new file mode 100644
index 0000000000..ecbfbced00
--- /dev/null
+++ b/examples/ai/langchain/index.py
@@ -0,0 +1,80 @@
+"""
+SuperDoc + LangChain
+
+Minimal agentic loop: any LangChain-compatible model uses SuperDoc tools
+to review and edit a Word document.
+
+Usage: OPENAI_API_KEY=sk-... python index.py [input.docx] [output.docx]
+
+Requires:
+ - pip install superdoc-sdk langchain-openai langgraph
+ - OPENAI_API_KEY (or swap ChatOpenAI for ChatAnthropic, ChatGoogleGenerativeAI, etc.)
+"""
+
+import sys
+import json
+import shutil
+from pathlib import Path
+from langchain_openai import ChatOpenAI
+from langchain_core.tools import StructuredTool
+from langgraph.prebuilt import create_react_agent
+from langchain_core.messages import HumanMessage
+from superdoc import SuperDocClient, choose_tools, dispatch_superdoc_tool
+
+
+def make_superdoc_tool(client, tool_def):
+ """Wrap a SuperDoc tool definition as a LangChain StructuredTool."""
+
+ def invoke(**kwargs) -> str:
+ print(f" Tool: {tool_def['name']}")
+ result = dispatch_superdoc_tool(client, tool_def["name"], kwargs)
+ return json.dumps(result)
+
+ return StructuredTool.from_function(
+ func=invoke,
+ name=tool_def["name"],
+ description=tool_def["description"],
+ infer_schema=False,
+ )
+
+
+def main():
+ args = sys.argv[1:]
+ input_path = str(Path(args[0] if args else "contract.docx").resolve())
+ output_path = str(Path(args[1] if len(args) > 1 else "reviewed.docx").resolve())
+
+ # 1. Connect to SuperDoc — copy to output path so the original is preserved
+ shutil.copy2(input_path, output_path)
+ client = SuperDocClient()
+ client.connect()
+ client.doc.open({"doc": output_path})
+
+ # 2. Get tools in generic format and wrap as LangChain tools
+ # Use mode="all" — no discover_tools since the framework manages a fixed tool set
+ result = choose_tools({"provider": "generic", "mode": "all"})
+ tools = [make_superdoc_tool(client, t) for t in result["tools"]]
+
+ # 3. Create a ReAct agent
+ model = ChatOpenAI(model="gpt-4o")
+ agent = create_react_agent(
+ model=model,
+ tools=tools,
+ prompt="You edit .docx files using SuperDoc tools. Use tracked changes for all edits.",
+ )
+
+ # 4. Run the agent
+ result = agent.invoke(
+ {"messages": [HumanMessage(content="Review this contract. Fix vague language and one-sided terms.")]}
+ )
+
+ last_message = result["messages"][-1]
+ print(last_message.content)
+
+ # 5. Save (in-place to the copy)
+ client.doc.save()
+ client.dispose()
+ print(f"\nSaved to {output_path}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/ai/langchain/index.ts b/examples/ai/langchain/index.ts
new file mode 100644
index 0000000000..58363bbb8f
--- /dev/null
+++ b/examples/ai/langchain/index.ts
@@ -0,0 +1,77 @@
+/**
+ * SuperDoc + LangChain
+ *
+ * Minimal agentic loop: any LangChain-compatible model uses SuperDoc tools
+ * to review and edit a Word document.
+ *
+ * Usage: OPENAI_API_KEY=sk-... npx tsx index.ts [input.docx] [output.docx]
+ *
+ * Requires: OPENAI_API_KEY (or swap ChatOpenAI for ChatAnthropic, ChatGoogleGenerativeAI, etc.)
+ */
+
+import path from 'node:path';
+import { copyFileSync } from 'node:fs';
+import { ChatOpenAI } from '@langchain/openai';
+import { DynamicStructuredTool } from '@langchain/core/tools';
+import { createReactAgent } from '@langchain/langgraph/prebuilt';
+import { HumanMessage } from '@langchain/core/messages';
+import { z } from 'zod';
+import {
+ createSuperDocClient,
+ chooseTools,
+ dispatchSuperDocTool,
+} from '@superdoc-dev/sdk';
+
+async function main() {
+ const [rawInput = 'contract.docx', rawOutput = 'reviewed.docx'] = process.argv.slice(2);
+ const inputPath = path.resolve(rawInput);
+ const outputPath = path.resolve(rawOutput);
+
+ // 1. Connect to SuperDoc — copy to output path so the original is preserved
+ copyFileSync(inputPath, outputPath);
+ const client = createSuperDocClient();
+ await client.connect();
+ await client.doc.open({ doc: outputPath });
+
+ // 2. Get tools in generic format and wrap as LangChain tools (all tools — no discover_tools since the framework manages a fixed tool set)
+ const { tools: sdTools } = await chooseTools({ provider: 'generic', mode: 'all' });
+
+ const langchainTools = (
+ sdTools as Array<{ name: string; description: string; parameters: Record }>
+ ).map(
+ (t) =>
+ new DynamicStructuredTool({
+ name: t.name,
+ description: t.description,
+ schema: z.object({}).passthrough(), // Accept any params — SuperDoc SDK validates
+ func: async (args) => {
+ console.log(` Tool: ${t.name}`);
+ const result = await dispatchSuperDocTool(client, t.name, args as Record);
+ return JSON.stringify(result);
+ },
+ }),
+ );
+
+ // 3. Create a ReAct agent
+ const model = new ChatOpenAI({ model: 'gpt-4o' });
+ const agent = createReactAgent({
+ llm: model,
+ tools: langchainTools,
+ prompt: 'You edit .docx files using SuperDoc tools. Use tracked changes for all edits.',
+ });
+
+ // 4. Run the agent
+ const result = await agent.invoke({
+ messages: [new HumanMessage('Review this contract. Fix vague language and one-sided terms.')],
+ });
+
+ const lastMessage = result.messages[result.messages.length - 1];
+ console.log(lastMessage.content);
+
+ // 5. Save (in-place to the copy)
+ await client.doc.save();
+ await client.dispose();
+ console.log(`\nSaved to ${outputPath}`);
+}
+
+main().catch((e) => { console.error(e.message); process.exit(1); });
diff --git a/examples/ai/langchain/package.json b/examples/ai/langchain/package.json
new file mode 100644
index 0000000000..af11d98746
--- /dev/null
+++ b/examples/ai/langchain/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "superdoc-langchain",
+ "private": true,
+ "type": "module",
+ "scripts": {
+ "start": "tsx index.ts"
+ },
+ "dependencies": {
+ "@langchain/openai": "^0.4.0",
+ "@langchain/core": "^0.3.0",
+ "@langchain/langgraph": "^0.2.0",
+ "@superdoc-dev/sdk": "latest",
+ "zod": "^3.23.0"
+ },
+ "devDependencies": {
+ "tsx": "^4.21.0"
+ }
+}
diff --git a/examples/ai/vercel-ai/README.md b/examples/ai/vercel-ai/README.md
new file mode 100644
index 0000000000..c7ffcded76
--- /dev/null
+++ b/examples/ai/vercel-ai/README.md
@@ -0,0 +1,41 @@
+# SuperDoc + Vercel AI SDK
+
+Agentic document editing using the Vercel AI SDK. The cleanest integration — `generateText` handles the agentic loop automatically.
+
+**Docs:** [Integrations](https://docs.superdoc.dev/document-engine/ai-agents/integrations)
+
+## Prerequisites
+
+- `OPENAI_API_KEY` environment variable (or swap the provider)
+
+## Run
+
+```bash
+npm install
+OPENAI_API_KEY=sk-... npx tsx index.ts contract.docx reviewed.docx
+```
+
+## Configuration
+
+The example uses OpenAI by default. Swap the provider import to use any model Vercel AI supports:
+
+```typescript
+// OpenAI (default)
+import { openai } from '@ai-sdk/openai';
+model: openai('gpt-4o')
+
+// Anthropic
+import { anthropic } from '@ai-sdk/anthropic';
+model: anthropic('claude-sonnet-4-6-20250725')
+
+// Google
+import { google } from '@ai-sdk/google';
+model: google('gemini-2.5-pro')
+```
+
+## How it works
+
+1. Connects to SuperDoc via the SDK
+2. Loads tool definitions in Vercel format and wraps them as `tool()` objects
+3. Calls `generateText` with `maxSteps: 20` — the SDK handles the tool call loop
+4. Saves the reviewed document
diff --git a/examples/ai/vercel-ai/contract.docx b/examples/ai/vercel-ai/contract.docx
new file mode 100644
index 0000000000..5b911d3399
Binary files /dev/null and b/examples/ai/vercel-ai/contract.docx differ
diff --git a/examples/ai/vercel-ai/index.ts b/examples/ai/vercel-ai/index.ts
new file mode 100644
index 0000000000..203cc0854f
--- /dev/null
+++ b/examples/ai/vercel-ai/index.ts
@@ -0,0 +1,69 @@
+/**
+ * SuperDoc + Vercel AI SDK
+ *
+ * Minimal agentic loop: any model via the Vercel AI SDK uses SuperDoc tools
+ * to review and edit a Word document.
+ *
+ * Usage: OPENAI_API_KEY=sk-... npx tsx index.ts [input.docx] [output.docx]
+ *
+ * Requires: OPENAI_API_KEY (or swap the provider — Vercel AI supports
+ * Anthropic, Google, Mistral, and others with the same interface).
+ */
+
+import path from 'node:path';
+import { copyFileSync } from 'node:fs';
+import { generateText, tool } from 'ai';
+import { openai } from '@ai-sdk/openai';
+import {
+ createSuperDocClient,
+ chooseTools,
+ dispatchSuperDocTool,
+} from '@superdoc-dev/sdk';
+import { z } from 'zod';
+
+async function main() {
+ const [rawInput = 'contract.docx', rawOutput = 'reviewed.docx'] = process.argv.slice(2);
+ const inputPath = path.resolve(rawInput);
+ const outputPath = path.resolve(rawOutput);
+
+ // 1. Connect to SuperDoc — copy to output path so the original is preserved
+ copyFileSync(inputPath, outputPath);
+ const client = createSuperDocClient();
+ await client.connect();
+ await client.doc.open({ doc: outputPath });
+
+ // 2. Get tools in Vercel AI format (all tools — no discover_tools since the framework manages a fixed tool set)
+ const { tools: sdTools } = await chooseTools({ provider: 'vercel', mode: 'all' });
+
+ // Convert SuperDoc tool definitions to Vercel AI `tool()` objects
+ const vercelTools: Record> = {};
+ for (const t of sdTools as Array<{ type: string; function: { name: string; description: string; parameters: Record } }>) {
+ const fn = t.function;
+ vercelTools[fn.name] = tool({
+ description: fn.description,
+ parameters: z.object({}).passthrough(), // Accept any params — SuperDoc SDK validates
+ execute: async (args) => {
+ console.log(` Tool: ${fn.name}`);
+ return dispatchSuperDocTool(client, fn.name, args as Record);
+ },
+ });
+ }
+
+ // 3. Run with generateText — handles the agentic loop automatically
+ const result = await generateText({
+ model: openai('gpt-4o'),
+ system: 'You edit .docx files using SuperDoc tools. Use tracked changes for all edits.',
+ prompt: 'Review this contract. Fix vague language and one-sided terms.',
+ tools: vercelTools,
+ maxSteps: 20,
+ });
+
+ console.log(result.text);
+
+ // 4. Save (in-place to the copy)
+ await client.doc.save();
+ await client.dispose();
+ console.log(`\nSaved to ${outputPath}`);
+}
+
+main().catch((e) => { console.error(e.message); process.exit(1); });
diff --git a/examples/ai/vercel-ai/package.json b/examples/ai/vercel-ai/package.json
new file mode 100644
index 0000000000..b66706966f
--- /dev/null
+++ b/examples/ai/vercel-ai/package.json
@@ -0,0 +1,17 @@
+{
+ "name": "superdoc-vercel-ai",
+ "private": true,
+ "type": "module",
+ "scripts": {
+ "start": "tsx index.ts"
+ },
+ "dependencies": {
+ "ai": "^4.0.0",
+ "@ai-sdk/openai": "^1.0.0",
+ "@superdoc-dev/sdk": "latest",
+ "zod": "^3.23.0"
+ },
+ "devDependencies": {
+ "tsx": "^4.21.0"
+ }
+}
diff --git a/examples/ai/vertex/README.md b/examples/ai/vertex/README.md
new file mode 100644
index 0000000000..53799e73b4
--- /dev/null
+++ b/examples/ai/vertex/README.md
@@ -0,0 +1,43 @@
+# SuperDoc + Google Vertex AI
+
+Agentic document editing using Gemini on Vertex AI.
+
+**Docs:** [Integrations](https://docs.superdoc.dev/document-engine/ai-agents/integrations)
+
+## Prerequisites
+
+- Google Cloud credentials (`gcloud auth application-default login` or a service account key)
+- A Google Cloud project with Vertex AI API enabled
+
+## Run
+
+### Node.js
+
+```bash
+npm install
+GOOGLE_CLOUD_PROJECT=your-project npx tsx index.ts contract.docx reviewed.docx
+```
+
+### Python
+
+```bash
+python -m venv venv && source venv/bin/activate
+pip install superdoc-sdk google-cloud-aiplatform
+GOOGLE_CLOUD_PROJECT=your-project python index.py contract.docx reviewed.docx
+```
+
+## Configuration
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `GOOGLE_CLOUD_PROJECT` | `your-project-id` | Google Cloud project ID |
+| `GOOGLE_CLOUD_LOCATION` | `us-central1` | Vertex AI region |
+| `VERTEX_MODEL` | `gemini-2.5-pro` | Any Gemini model that supports function calling |
+
+## How it works
+
+1. Connects to SuperDoc via the SDK
+2. Loads tool definitions in generic format and converts to Vertex `functionDeclarations`
+3. Starts a chat with Gemini
+4. Runs an agentic loop: the model calls SuperDoc tools to read, query, and edit the document
+5. Saves the reviewed document
diff --git a/examples/ai/vertex/contract.docx b/examples/ai/vertex/contract.docx
new file mode 100644
index 0000000000..5b911d3399
Binary files /dev/null and b/examples/ai/vertex/contract.docx differ
diff --git a/examples/ai/vertex/index.py b/examples/ai/vertex/index.py
new file mode 100644
index 0000000000..114441c0d5
--- /dev/null
+++ b/examples/ai/vertex/index.py
@@ -0,0 +1,122 @@
+"""
+SuperDoc + Google Vertex AI
+
+Minimal agentic loop: Gemini on Vertex AI uses SuperDoc tools
+to review and edit a Word document.
+
+Usage: python index.py [input.docx] [output.docx]
+
+Requires:
+ - pip install superdoc-sdk google-cloud-aiplatform
+ - Google Cloud credentials (gcloud auth application-default login)
+"""
+
+import sys
+import os
+import shutil
+from pathlib import Path
+from vertexai.generative_models import GenerativeModel, Tool, FunctionDeclaration, Part
+import vertexai
+from superdoc import SuperDocClient, choose_tools, dispatch_superdoc_tool, sanitize_tool_schemas
+
+PROJECT = os.environ.get("GOOGLE_CLOUD_PROJECT", "your-project-id")
+LOCATION = os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1")
+MODEL = os.environ.get("VERTEX_MODEL", "gemini-2.5-pro")
+
+
+def to_vertex_tools(generic_tools):
+ """Convert SuperDoc generic-format tools to Vertex AI function declarations."""
+ sanitized = sanitize_tool_schemas(generic_tools, "vertex")
+ declarations = [
+ FunctionDeclaration(
+ name=t["name"],
+ description=t["description"],
+ parameters=t["parameters"],
+ )
+ for t in sanitized
+ ]
+ return [Tool(function_declarations=declarations)]
+
+
+def main():
+ args = sys.argv[1:]
+ input_path = str(Path(args[0] if args else "contract.docx").resolve())
+ output_path = str(Path(args[1] if len(args) > 1 else "reviewed.docx").resolve())
+
+ # 1. Connect to SuperDoc — copy to output path so the original is preserved
+ shutil.copy2(input_path, output_path)
+ client = SuperDocClient()
+ client.connect()
+ client.doc.open({"doc": output_path})
+
+ # 2. Get tools in generic format and convert to Vertex shape
+ result = choose_tools({"provider": "generic"})
+ vertex_tools = to_vertex_tools(result["tools"])
+
+ # 3. Set up Vertex AI
+ vertexai.init(project=PROJECT, location=LOCATION)
+ model = GenerativeModel(
+ MODEL,
+ tools=vertex_tools,
+ system_instruction="You edit .docx files using SuperDoc tools. Use tracked changes for all edits.",
+ )
+ chat = model.start_chat()
+
+ # 4. Agentic loop
+ response = chat.send_message("Review this contract. Fix vague language and one-sided terms.")
+
+ for _ in range(20):
+ function_calls = [
+ part for part in response.candidates[0].content.parts if part.function_call.name
+ ]
+
+ if not function_calls:
+ # Print final response
+ for part in response.candidates[0].content.parts:
+ if part.text:
+ print(part.text)
+ break
+
+ function_responses = []
+ for part in function_calls:
+ name = part.function_call.name
+ args = dict(part.function_call.args) if part.function_call.args else {}
+ print(f" Tool: {name}")
+
+ try:
+ if name == "discover_tools":
+ # discover_tools is a meta-tool — handle client-side via choose_tools
+ groups = args.get("groups")
+ discovered = choose_tools({"provider": "generic", "groups": groups})
+ new_tools = discovered.get("tools", [])
+ sanitized = sanitize_tool_schemas(new_tools, "vertex")
+ for t in sanitized:
+ vertex_tools[0].function_declarations.append(
+ FunctionDeclaration(
+ name=t["name"],
+ description=t["description"],
+ parameters=t["parameters"],
+ )
+ )
+ result = discovered
+ else:
+ result = dispatch_superdoc_tool(client, name, args)
+
+ function_responses.append(
+ Part.from_function_response(name=name, response=result)
+ )
+ except Exception as e:
+ function_responses.append(
+ Part.from_function_response(name=name, response={"error": str(e)})
+ )
+
+ response = chat.send_message(function_responses)
+
+ # 5. Save (in-place to the copy)
+ client.doc.save()
+ client.dispose()
+ print(f"\nSaved to {output_path}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/ai/vertex/index.ts b/examples/ai/vertex/index.ts
new file mode 100644
index 0000000000..4495cda794
--- /dev/null
+++ b/examples/ai/vertex/index.ts
@@ -0,0 +1,119 @@
+/**
+ * SuperDoc + Google Vertex AI
+ *
+ * Minimal agentic loop: Gemini on Vertex AI uses SuperDoc tools
+ * to review and edit a Word document.
+ *
+ * Usage: npx tsx index.ts [input.docx] [output.docx]
+ *
+ * Requires: Google Cloud credentials configured (gcloud auth application-default login).
+ */
+
+import path from 'node:path';
+import { copyFileSync } from 'node:fs';
+import {
+ VertexAI,
+ type FunctionDeclaration,
+ type Tool as VertexTool,
+ type Part,
+} from '@google-cloud/vertexai';
+import {
+ createSuperDocClient,
+ chooseTools,
+ dispatchSuperDocTool,
+ sanitizeToolSchemas,
+ mergeDiscoveredTools,
+ type ToolGroup,
+} from '@superdoc-dev/sdk';
+
+const PROJECT = process.env.GOOGLE_CLOUD_PROJECT ?? 'your-project-id';
+const LOCATION = process.env.GOOGLE_CLOUD_LOCATION ?? 'us-central1';
+const MODEL = process.env.VERTEX_MODEL ?? 'gemini-2.5-pro';
+
+async function main() {
+ const [rawInput = 'contract.docx', rawOutput = 'reviewed.docx'] = process.argv.slice(2);
+ const inputPath = path.resolve(rawInput);
+ const outputPath = path.resolve(rawOutput);
+
+ // 1. Connect to SuperDoc — copy to output path so the original is preserved
+ copyFileSync(inputPath, outputPath);
+ const client = createSuperDocClient();
+ await client.connect();
+ await client.doc.open({ doc: outputPath });
+
+ // 2. Get tools in generic format, sanitize for Vertex, and build declarations
+ const { tools: sdTools } = await chooseTools({ provider: 'generic' });
+ const sanitized = sanitizeToolSchemas(sdTools, 'vertex') as Array<{ name: string; description: string; parameters: Record }>;
+ const vertexTools: VertexTool[] = [{
+ functionDeclarations: sanitized.map((t): FunctionDeclaration => ({
+ name: t.name,
+ description: t.description,
+ parameters: t.parameters as FunctionDeclaration['parameters'],
+ })),
+ }];
+
+ // 3. Set up Vertex AI
+ const vertexAI = new VertexAI({ project: PROJECT, location: LOCATION });
+ const model = vertexAI.getGenerativeModel({
+ model: MODEL,
+ tools: vertexTools,
+ systemInstruction: { role: 'system', parts: [{ text: 'You edit .docx files using SuperDoc tools. Use tracked changes for all edits.' }] },
+ });
+
+ const chat = model.startChat();
+
+ // 4. Agentic loop
+ let response = await chat.sendMessage([
+ { text: 'Review this contract. Fix vague language and one-sided terms.' },
+ ]);
+
+ for (let turn = 0; turn < 20; turn++) {
+ const candidate = response.response.candidates?.[0];
+ if (!candidate) break;
+
+ const functionCalls = candidate.content.parts.filter((p) => p.functionCall);
+ if (!functionCalls.length) {
+ // Print final response
+ for (const part of candidate.content.parts) {
+ if (part.text) console.log(part.text);
+ }
+ break;
+ }
+
+ const functionResponses: Part[] = [];
+ for (const part of functionCalls) {
+ const { name, args } = part.functionCall!;
+ console.log(` Tool: ${name}`);
+ try {
+ let result: unknown;
+
+ if (name === 'discover_tools') {
+ // discover_tools is a meta-tool — handle client-side via chooseTools
+ const groups = ((args ?? {}) as Record).groups as ToolGroup[] | undefined;
+ const discovered = await chooseTools({ provider: 'generic', groups });
+ mergeDiscoveredTools(vertexTools, discovered, { provider: 'generic', target: 'vertex' });
+ result = discovered;
+ } else {
+ result = await dispatchSuperDocTool(client, name, (args ?? {}) as Record);
+ }
+
+ functionResponses.push({
+ functionResponse: { name, response: result as object },
+ });
+ } catch (err) {
+ functionResponses.push({
+ functionResponse: { name, response: { error: (err as Error).message } },
+ });
+ }
+ }
+
+ response = await chat.sendMessage(functionResponses);
+ }
+
+ // 5. Save (in-place to the copy)
+ await client.doc.save();
+ await client.dispose();
+ console.log(`\nSaved to ${outputPath}`);
+}
+
+main().catch((e) => { console.error(e.message); process.exit(1); });
diff --git a/examples/ai/vertex/package.json b/examples/ai/vertex/package.json
new file mode 100644
index 0000000000..d6e705ba51
--- /dev/null
+++ b/examples/ai/vertex/package.json
@@ -0,0 +1,15 @@
+{
+ "name": "superdoc-vertex",
+ "private": true,
+ "type": "module",
+ "scripts": {
+ "start": "tsx index.ts"
+ },
+ "dependencies": {
+ "@google-cloud/vertexai": "^1.9.0",
+ "@superdoc-dev/sdk": "latest"
+ },
+ "devDependencies": {
+ "tsx": "^4.21.0"
+ }
+}
diff --git a/packages/sdk/langs/node/src/helpers/__tests__/platform.test.ts b/packages/sdk/langs/node/src/helpers/__tests__/platform.test.ts
new file mode 100644
index 0000000000..f1867ea12b
--- /dev/null
+++ b/packages/sdk/langs/node/src/helpers/__tests__/platform.test.ts
@@ -0,0 +1,271 @@
+import { describe, expect, test } from 'bun:test';
+import { sanitizeToolSchemas, formatToolResult, formatToolError, mergeDiscoveredTools } from '../platform.js';
+
+/* ------------------------------------------------------------------ */
+/* sanitizeToolSchemas */
+/* ------------------------------------------------------------------ */
+
+describe('sanitizeToolSchemas', () => {
+ test('strips const keyword for vertex', () => {
+ const tools = [
+ {
+ name: 'query_match',
+ parameters: {
+ type: 'object',
+ properties: {
+ matchKind: { const: 'text' },
+ query: { type: 'string' },
+ },
+ },
+ },
+ ];
+
+ const result = sanitizeToolSchemas(tools, 'vertex');
+
+ expect(result[0].parameters.properties.matchKind).toEqual({});
+ expect(result[0].parameters.properties.query).toEqual({ type: 'string' });
+ });
+
+ test('strips const recursively in nested schemas', () => {
+ const tools = [
+ {
+ name: 'test',
+ parameters: {
+ oneOf: [
+ { properties: { kind: { const: 'a' }, value: { type: 'string' } } },
+ { properties: { kind: { const: 'b' }, value: { type: 'number' } } },
+ ],
+ },
+ },
+ ];
+
+ const result = sanitizeToolSchemas(tools, 'vertex');
+
+ expect(result[0].parameters.oneOf[0].properties.kind).toEqual({});
+ expect(result[0].parameters.oneOf[1].properties.kind).toEqual({});
+ expect(result[0].parameters.oneOf[0].properties.value).toEqual({ type: 'string' });
+ });
+
+ test('does not mutate original tools', () => {
+ const tools = [{ name: 'test', parameters: { properties: { x: { const: 'a' } } } }];
+ const original = JSON.stringify(tools);
+
+ sanitizeToolSchemas(tools, 'vertex');
+
+ expect(JSON.stringify(tools)).toBe(original);
+ });
+
+ test('is a no-op for bedrock', () => {
+ const tools = [{ name: 'test', parameters: { properties: { x: { const: 'a' } } } }];
+ const result = sanitizeToolSchemas(tools, 'bedrock');
+ expect(result).toBe(tools); // same reference — no cloning needed
+ });
+
+ test('handles empty array', () => {
+ expect(sanitizeToolSchemas([], 'vertex')).toEqual([]);
+ });
+});
+
+/* ------------------------------------------------------------------ */
+/* formatToolResult */
+/* ------------------------------------------------------------------ */
+
+describe('formatToolResult', () => {
+ describe('bedrock', () => {
+ test('wraps object result in toolResult shape', () => {
+ const result = formatToolResult({ text: 'hello' }, { target: 'bedrock', toolUseId: 'tu-1' });
+ expect(result).toEqual({
+ toolResult: { toolUseId: 'tu-1', content: [{ json: { text: 'hello' } }] },
+ });
+ });
+
+ test('wraps array result in { result } wrapper', () => {
+ const result = formatToolResult([1, 2, 3], { target: 'bedrock', toolUseId: 'tu-1' });
+ expect(result).toEqual({
+ toolResult: { toolUseId: 'tu-1', content: [{ json: { result: [1, 2, 3] } }] },
+ });
+ });
+
+ test('wraps string result in { result } wrapper', () => {
+ const result = formatToolResult('hello', { target: 'bedrock', toolUseId: 'tu-1' });
+ expect(result).toEqual({
+ toolResult: { toolUseId: 'tu-1', content: [{ json: { result: 'hello' } }] },
+ });
+ });
+
+ test('wraps null result in { result } wrapper', () => {
+ const result = formatToolResult(null, { target: 'bedrock', toolUseId: 'tu-1' });
+ expect(result).toEqual({
+ toolResult: { toolUseId: 'tu-1', content: [{ json: { result: null } }] },
+ });
+ });
+ });
+
+ describe('vertex', () => {
+ test('wraps in functionResponse shape', () => {
+ const result = formatToolResult({ data: 1 }, { target: 'vertex', name: 'get_text' });
+ expect(result).toEqual({
+ functionResponse: { name: 'get_text', response: { data: 1 } },
+ });
+ });
+ });
+
+ describe('anthropic', () => {
+ test('wraps in tool_result shape', () => {
+ const result = formatToolResult({ ok: true }, { target: 'anthropic', toolUseId: 'tu-1' });
+ expect(result).toEqual({
+ type: 'tool_result',
+ tool_use_id: 'tu-1',
+ content: '{"ok":true}',
+ });
+ });
+ });
+
+ describe('openai', () => {
+ test('wraps in tool role message', () => {
+ const result = formatToolResult({ ok: true }, { target: 'openai', toolUseId: 'call-1', name: 'fn' });
+ expect(result).toEqual({
+ role: 'tool',
+ tool_call_id: 'call-1',
+ content: '{"ok":true}',
+ });
+ });
+ });
+});
+
+/* ------------------------------------------------------------------ */
+/* formatToolError */
+/* ------------------------------------------------------------------ */
+
+describe('formatToolError', () => {
+ test('bedrock error shape', () => {
+ const result = formatToolError(new Error('boom'), { target: 'bedrock', toolUseId: 'tu-1' });
+ expect(result).toEqual({
+ toolResult: { toolUseId: 'tu-1', content: [{ text: 'Error: boom' }], status: 'error' },
+ });
+ });
+
+ test('vertex error shape', () => {
+ const result = formatToolError('fail', { target: 'vertex', name: 'fn' });
+ expect(result).toEqual({
+ functionResponse: { name: 'fn', response: { error: 'fail' } },
+ });
+ });
+
+ test('anthropic error shape', () => {
+ const result = formatToolError(new Error('nope'), { target: 'anthropic', toolUseId: 'tu-1' });
+ expect(result).toEqual({
+ type: 'tool_result',
+ tool_use_id: 'tu-1',
+ content: 'Error: nope',
+ is_error: true,
+ });
+ });
+
+ test('openai error shape', () => {
+ const result = formatToolError(new Error('bad'), { target: 'openai', toolUseId: 'call-1' });
+ expect(result).toEqual({
+ role: 'tool',
+ tool_call_id: 'call-1',
+ content: 'Error: bad',
+ });
+ });
+});
+
+/* ------------------------------------------------------------------ */
+/* mergeDiscoveredTools */
+/* ------------------------------------------------------------------ */
+
+describe('mergeDiscoveredTools', () => {
+ const anthropicTools = [
+ { name: 'add_comment', description: 'Add a comment', input_schema: { type: 'object' } },
+ { name: 'format_bold', description: 'Bold text', input_schema: { type: 'object' } },
+ ];
+
+ const genericTools = [
+ {
+ name: 'add_comment',
+ description: 'Add a comment',
+ parameters: { type: 'object', properties: { kind: { const: 'inline' } } },
+ },
+ { name: 'format_bold', description: 'Bold text', parameters: { type: 'object' } },
+ ];
+
+ describe('bedrock target', () => {
+ test('merges tools into bedrock toolConfig', () => {
+ const toolConfig = { tools: [{ toolSpec: { name: 'existing', description: 'x', inputSchema: { json: {} } } }] };
+ const result = { tools: anthropicTools };
+
+ const count = mergeDiscoveredTools(toolConfig, result, { provider: 'anthropic', target: 'bedrock' });
+
+ expect(count).toBe(2);
+ expect(toolConfig.tools).toHaveLength(3);
+ expect(toolConfig.tools[1]).toEqual({
+ toolSpec: {
+ name: 'add_comment',
+ description: 'Add a comment',
+ inputSchema: { json: { type: 'object' } },
+ },
+ });
+ });
+
+ test('skips duplicate tools', () => {
+ const toolConfig = {
+ tools: [{ toolSpec: { name: 'add_comment', description: 'x', inputSchema: { json: {} } } }],
+ };
+ const result = { tools: anthropicTools };
+
+ const count = mergeDiscoveredTools(toolConfig, result, { provider: 'anthropic', target: 'bedrock' });
+
+ expect(count).toBe(1); // only format_bold added, add_comment skipped
+ expect(toolConfig.tools).toHaveLength(2);
+ });
+ });
+
+ describe('vertex target', () => {
+ test('merges tools and sanitizes schemas', () => {
+ const toolConfig = [{ functionDeclarations: [{ name: 'existing', description: 'x', parameters: {} }] }];
+ const result = { tools: genericTools };
+
+ const count = mergeDiscoveredTools(toolConfig, result, { provider: 'generic', target: 'vertex' });
+
+ expect(count).toBe(2);
+ expect(toolConfig[0].functionDeclarations).toHaveLength(3);
+ // const keyword should be stripped
+ const addComment = toolConfig[0].functionDeclarations[1] as Record;
+ expect(JSON.stringify(addComment)).not.toContain('"const"');
+ });
+ });
+
+ describe('direct provider (no target)', () => {
+ test('merges into plain array', () => {
+ const toolConfig = [{ name: 'existing', description: 'x', input_schema: {} }] as unknown[];
+ const result = { tools: anthropicTools };
+
+ const count = mergeDiscoveredTools(toolConfig, result, { provider: 'anthropic' });
+
+ expect(count).toBe(2);
+ expect(toolConfig).toHaveLength(3);
+ });
+ });
+
+ describe('edge cases', () => {
+ test('returns 0 for empty discover result', () => {
+ const toolConfig = { tools: [] };
+ const count = mergeDiscoveredTools(toolConfig, {}, { provider: 'anthropic', target: 'bedrock' });
+ expect(count).toBe(0);
+ });
+
+ test('returns 0 for non-object discover result', () => {
+ const toolConfig = { tools: [] };
+ const count = mergeDiscoveredTools(toolConfig, 'not an object', { provider: 'anthropic', target: 'bedrock' });
+ expect(count).toBe(0);
+ });
+
+ test('returns 0 for null discover result', () => {
+ const toolConfig = { tools: [] };
+ const count = mergeDiscoveredTools(toolConfig, null, { provider: 'anthropic', target: 'bedrock' });
+ expect(count).toBe(0);
+ });
+ });
+});
diff --git a/packages/sdk/langs/node/src/helpers/platform.ts b/packages/sdk/langs/node/src/helpers/platform.ts
new file mode 100644
index 0000000000..1d8ecad951
--- /dev/null
+++ b/packages/sdk/langs/node/src/helpers/platform.ts
@@ -0,0 +1,322 @@
+/**
+ * Platform helper methods for the Node SDK.
+ *
+ * These are hand-written convenience wrappers that handle platform-specific
+ * quirks when integrating SuperDoc tools with cloud AI platforms (Bedrock,
+ * Vertex AI) and direct APIs (OpenAI, Anthropic). They are NOT generated
+ * from the contract and will not be overwritten by `pnpm run generate:all`.
+ *
+ * Usage:
+ * ```ts
+ * import { chooseTools, dispatchSuperDocTool } from '@superdoc-dev/sdk';
+ * import { sanitizeToolSchemas, formatToolResult, mergeDiscoveredTools } from '@superdoc-dev/sdk/helpers/platform';
+ *
+ * // Vertex AI: strip unsupported JSON Schema keywords
+ * const { tools } = await chooseTools({ provider: 'generic' });
+ * const sanitized = sanitizeToolSchemas(tools, 'vertex');
+ *
+ * // Bedrock: format tool results in platform-native shape
+ * const result = await dispatchSuperDocTool(client, name, args);
+ * const formatted = formatToolResult(result, { target: 'bedrock', toolUseId });
+ *
+ * // Merge discover_tools output into platform-native config
+ * mergeDiscoveredTools(toolConfig, discoverResult, { provider: 'anthropic', target: 'bedrock' });
+ * ```
+ */
+
+import type { ToolProvider } from '../tools.js';
+
+/* ------------------------------------------------------------------ */
+/* Types */
+/* ------------------------------------------------------------------ */
+
+/** Cloud platforms that need schema or result sanitization. */
+export type PlatformTarget = 'bedrock' | 'vertex';
+
+/** All targets that `formatToolResult` supports. */
+export type ResultTarget = 'bedrock' | 'vertex' | 'anthropic' | 'openai';
+
+export interface FormatToolResultOptions {
+ target: ResultTarget;
+ /** Required for bedrock, anthropic, openai. */
+ toolUseId?: string;
+ /** Required for vertex, openai. */
+ name?: string;
+}
+
+export interface MergeDiscoveredToolsOptions {
+ provider: ToolProvider;
+ target?: PlatformTarget;
+}
+
+/* ------------------------------------------------------------------ */
+/* sanitizeToolSchemas */
+/* ------------------------------------------------------------------ */
+
+/**
+ * JSON Schema keywords unsupported by each platform.
+ * Extend this map when new platform incompatibilities are discovered.
+ */
+const UNSUPPORTED_KEYWORDS: Record> = {
+ vertex: new Set(['const']),
+ bedrock: new Set(), // no-op currently — future-proof
+};
+
+/**
+ * Recursively strip JSON Schema keywords that the target platform doesn't support.
+ *
+ * Returns a new array — the original tools are not mutated.
+ */
+export function sanitizeToolSchemas(tools: T[], target: PlatformTarget): T[] {
+ const blocked = UNSUPPORTED_KEYWORDS[target];
+ if (!blocked || blocked.size === 0) return tools;
+ return tools.map((t) => deepStripKeys(t, blocked) as T);
+}
+
+function deepStripKeys(obj: unknown, blocked: Set): unknown {
+ if (Array.isArray(obj)) return obj.map((item) => deepStripKeys(item, blocked));
+ if (typeof obj !== 'object' || obj === null) return obj;
+
+ const result: Record = {};
+ for (const [key, value] of Object.entries(obj as Record)) {
+ if (blocked.has(key)) continue;
+ result[key] = deepStripKeys(value, blocked);
+ }
+ return result;
+}
+
+/* ------------------------------------------------------------------ */
+/* formatToolResult */
+/* ------------------------------------------------------------------ */
+
+/**
+ * Wrap a raw `dispatchSuperDocTool` result in the platform-native shape
+ * expected by each provider's conversation API.
+ */
+export function formatToolResult(result: unknown, options: FormatToolResultOptions): unknown {
+ const { target, toolUseId, name } = options;
+
+ switch (target) {
+ case 'bedrock': {
+ // Bedrock requires json content to be a plain object (not array or primitive)
+ const json = typeof result === 'object' && result !== null && !Array.isArray(result) ? result : { result };
+ return { toolResult: { toolUseId, content: [{ json }] } };
+ }
+
+ case 'vertex':
+ return { functionResponse: { name, response: result } };
+
+ case 'anthropic':
+ return {
+ type: 'tool_result',
+ tool_use_id: toolUseId,
+ content: JSON.stringify(result),
+ };
+
+ case 'openai':
+ return {
+ role: 'tool',
+ tool_call_id: toolUseId,
+ content: JSON.stringify(result),
+ };
+
+ default:
+ return result;
+ }
+}
+
+/**
+ * Format an error from a failed tool call in the platform-native error shape.
+ */
+export function formatToolError(error: unknown, options: FormatToolResultOptions): unknown {
+ const { target, toolUseId, name } = options;
+ const message = error instanceof Error ? error.message : String(error);
+
+ switch (target) {
+ case 'bedrock':
+ return {
+ toolResult: {
+ toolUseId,
+ content: [{ text: `Error: ${message}` }],
+ status: 'error',
+ },
+ };
+
+ case 'vertex':
+ return { functionResponse: { name, response: { error: message } } };
+
+ case 'anthropic':
+ return {
+ type: 'tool_result',
+ tool_use_id: toolUseId,
+ content: `Error: ${message}`,
+ is_error: true,
+ };
+
+ case 'openai':
+ return {
+ role: 'tool',
+ tool_call_id: toolUseId,
+ content: `Error: ${message}`,
+ };
+
+ default:
+ return { error: message };
+ }
+}
+
+/* ------------------------------------------------------------------ */
+/* mergeDiscoveredTools */
+/* ------------------------------------------------------------------ */
+
+/**
+ * Extract newly discovered tools from a `discover_tools` result, convert them
+ * to the provider's native format, apply platform sanitization, and merge them
+ * into an existing tool configuration object.
+ *
+ * Mutates `toolConfig` in place. Returns the number of new tools added.
+ *
+ * Supported toolConfig shapes:
+ * - **bedrock**: `{ tools: [{ toolSpec: { name, description, inputSchema } }] }`
+ * - **vertex**: `[{ functionDeclarations: [...] }]`
+ * - **openai/anthropic/vercel/generic**: `unknown[]` (array of tool objects)
+ */
+export function mergeDiscoveredTools(
+ toolConfig: unknown,
+ discoverResult: unknown,
+ options: MergeDiscoveredToolsOptions,
+): number {
+ const newTools = extractDiscoveredTools(discoverResult);
+ if (newTools.length === 0) return 0;
+
+ const { provider, target } = options;
+
+ // Collect existing tool names to avoid duplicates
+ const existingNames = collectExistingNames(toolConfig, target);
+
+ let added = 0;
+
+ for (const tool of newTools) {
+ const name = extractToolName(tool, provider);
+ if (!name || existingNames.has(name)) continue;
+ existingNames.add(name);
+
+ const formatted = formatToolForConfig(tool, provider, target);
+ pushToConfig(toolConfig, formatted, target);
+ added++;
+ }
+
+ return added;
+}
+
+/** Pull the `tools` array from a discover_tools result. */
+function extractDiscoveredTools(result: unknown): unknown[] {
+ if (typeof result !== 'object' || result === null) return [];
+ const obj = result as Record;
+ if (Array.isArray(obj.tools)) return obj.tools;
+ return [];
+}
+
+/** Extract a tool's name regardless of provider format. */
+function extractToolName(tool: unknown, provider: ToolProvider): string | null {
+ if (typeof tool !== 'object' || tool === null) return null;
+ const obj = tool as Record;
+
+ // Anthropic / Generic: top-level name
+ if (typeof obj.name === 'string') return obj.name;
+
+ // OpenAI / Vercel: nested under function.name
+ if (typeof obj.function === 'object' && obj.function !== null) {
+ const fn = obj.function as Record;
+ if (typeof fn.name === 'string') return fn.name;
+ }
+
+ return null;
+}
+
+/** Collect existing tool names from a platform-native config. */
+function collectExistingNames(toolConfig: unknown, target?: PlatformTarget): Set {
+ const names = new Set();
+
+ if (target === 'bedrock' && isRecord(toolConfig)) {
+ const tools = (toolConfig as Record).tools;
+ if (Array.isArray(tools)) {
+ for (const t of tools) {
+ const spec = isRecord(t) ? (t as Record).toolSpec : null;
+ if (isRecord(spec) && typeof (spec as Record).name === 'string') {
+ names.add((spec as Record).name as string);
+ }
+ }
+ }
+ } else if (target === 'vertex' && Array.isArray(toolConfig)) {
+ const decls = (toolConfig[0] as Record)?.functionDeclarations;
+ if (Array.isArray(decls)) {
+ for (const d of decls) {
+ if (isRecord(d) && typeof (d as Record).name === 'string') {
+ names.add((d as Record).name as string);
+ }
+ }
+ }
+ } else if (Array.isArray(toolConfig)) {
+ for (const t of toolConfig) {
+ if (isRecord(t)) {
+ const obj = t as Record;
+ if (typeof obj.name === 'string') names.add(obj.name);
+ if (isRecord(obj.function)) {
+ const fn = obj.function as Record;
+ if (typeof fn.name === 'string') names.add(fn.name);
+ }
+ }
+ }
+ }
+
+ return names;
+}
+
+/** Convert a discovered tool to the platform-native shape and push into config. */
+function formatToolForConfig(tool: unknown, _provider: ToolProvider, target?: PlatformTarget): unknown {
+ const obj = tool as Record;
+
+ if (target === 'bedrock') {
+ // Discovered tools from 'anthropic' provider: { name, description, input_schema }
+ return {
+ toolSpec: {
+ name: obj.name,
+ description: obj.description,
+ inputSchema: { json: obj.input_schema ?? obj.parameters },
+ },
+ };
+ }
+
+ if (target === 'vertex') {
+ // Discovered tools from 'generic' provider: { name, description, parameters }
+ const params = obj.parameters ?? obj.input_schema;
+ return {
+ name: obj.name,
+ description: obj.description,
+ parameters: params ? deepStripKeys(params, UNSUPPORTED_KEYWORDS.vertex) : params,
+ };
+ }
+
+ // For direct API providers (openai, anthropic, vercel, generic) — pass through as-is
+ return tool;
+}
+
+/** Push a formatted tool into the platform-native config structure. */
+function pushToConfig(toolConfig: unknown, formatted: unknown, target?: PlatformTarget): void {
+ if (target === 'bedrock' && isRecord(toolConfig)) {
+ const tools = (toolConfig as Record).tools;
+ if (Array.isArray(tools)) tools.push(formatted);
+ } else if (target === 'vertex' && Array.isArray(toolConfig)) {
+ const first = toolConfig[0] as Record | undefined;
+ if (first && Array.isArray(first.functionDeclarations)) {
+ first.functionDeclarations.push(formatted);
+ }
+ } else if (Array.isArray(toolConfig)) {
+ toolConfig.push(formatted);
+ }
+}
+
+function isRecord(v: unknown): v is Record {
+ return typeof v === 'object' && v !== null && !Array.isArray(v);
+}
diff --git a/packages/sdk/langs/node/src/index.ts b/packages/sdk/langs/node/src/index.ts
index ee8ed0ecd1..eb0c609618 100644
--- a/packages/sdk/langs/node/src/index.ts
+++ b/packages/sdk/langs/node/src/index.ts
@@ -40,5 +40,12 @@ export {
resolveToolOperation,
} from './tools.js';
export { SuperDocCliError } from './runtime/errors.js';
+export { formatToolError, formatToolResult, mergeDiscoveredTools, sanitizeToolSchemas } from './helpers/platform.js';
export type { InvokeOptions, OperationSpec, OperationParamSpec, SuperDocClientOptions } from './runtime/process.js';
export type { ToolChooserInput, ToolChooserMode, ToolGroup, ToolProvider } from './tools.js';
+export type {
+ FormatToolResultOptions,
+ MergeDiscoveredToolsOptions,
+ PlatformTarget,
+ ResultTarget,
+} from './helpers/platform.js';
diff --git a/packages/sdk/langs/node/src/tools.ts b/packages/sdk/langs/node/src/tools.ts
index 8de238486c..762f7d8416 100644
--- a/packages/sdk/langs/node/src/tools.ts
+++ b/packages/sdk/langs/node/src/tools.ts
@@ -428,7 +428,13 @@ export async function dispatchSuperDocTool(
invalidArgument(`Tool arguments for ${toolName} must be an object.`);
}
- validateDispatchArgs(operationId, args);
+ // Strip doc/sessionId — the SDK client manages session targeting after doc.open().
+ // Models fill these in because the tool schemas expose them, but passing them
+ // alongside an active session causes "stateless input.doc cannot be combined
+ // with a session target" errors.
+ const { doc: _doc, sessionId: _sid, ...cleanArgs } = args;
+
+ validateDispatchArgs(operationId, cleanArgs);
const method = resolveDocApiMethod(client, operationId);
- return method(args, invokeOptions);
+ return method(cleanArgs, invokeOptions);
}
diff --git a/packages/sdk/langs/python/superdoc/__init__.py b/packages/sdk/langs/python/superdoc/__init__.py
index 218a1131bb..00498856bf 100644
--- a/packages/sdk/langs/python/superdoc/__init__.py
+++ b/packages/sdk/langs/python/superdoc/__init__.py
@@ -1,6 +1,12 @@
from .client import AsyncSuperDocClient, SuperDocClient
from .errors import SuperDocError
from .skill_api import get_skill, install_skill, list_skills
+from .helpers.platform import (
+ format_tool_error,
+ format_tool_result,
+ merge_discovered_tools,
+ sanitize_tool_schemas,
+)
from .tools_api import (
choose_tools,
dispatch_superdoc_tool,
@@ -25,4 +31,8 @@
"choose_tools",
"dispatch_superdoc_tool",
"dispatch_superdoc_tool_async",
+ "format_tool_result",
+ "format_tool_error",
+ "merge_discovered_tools",
+ "sanitize_tool_schemas",
]
diff --git a/packages/sdk/langs/python/superdoc/helpers/__init__.py b/packages/sdk/langs/python/superdoc/helpers/__init__.py
index 473657d524..0b9967cdfb 100644
--- a/packages/sdk/langs/python/superdoc/helpers/__init__.py
+++ b/packages/sdk/langs/python/superdoc/helpers/__init__.py
@@ -17,6 +17,12 @@
unformat_strikethrough,
unformat_underline,
)
+from .platform import (
+ format_tool_error,
+ format_tool_result,
+ merge_discovered_tools,
+ sanitize_tool_schemas,
+)
__all__ = [
"clear_bold",
@@ -31,4 +37,8 @@
"unformat_italic",
"unformat_underline",
"unformat_strikethrough",
+ "format_tool_result",
+ "format_tool_error",
+ "merge_discovered_tools",
+ "sanitize_tool_schemas",
]
diff --git a/packages/sdk/langs/python/superdoc/helpers/platform.py b/packages/sdk/langs/python/superdoc/helpers/platform.py
new file mode 100644
index 0000000000..2d16f573a3
--- /dev/null
+++ b/packages/sdk/langs/python/superdoc/helpers/platform.py
@@ -0,0 +1,262 @@
+"""
+Platform helper methods for the Python SDK.
+
+These are hand-written convenience wrappers that handle platform-specific
+quirks when integrating SuperDoc tools with cloud AI platforms (Bedrock,
+Vertex AI) and direct APIs (OpenAI, Anthropic). They are NOT generated
+from the contract and will not be overwritten by codegen.
+
+Usage::
+
+ from superdoc import choose_tools, dispatch_superdoc_tool
+ from superdoc.helpers.platform import sanitize_tool_schemas, format_tool_result, merge_discovered_tools
+
+ # Vertex AI: strip unsupported JSON Schema keywords
+ result = choose_tools(provider="generic")
+ sanitized = sanitize_tool_schemas(result["tools"], "vertex")
+
+ # Bedrock: format tool results in platform-native shape
+ result = dispatch_superdoc_tool(client, name, args)
+ formatted = format_tool_result(result, target="bedrock", tool_use_id=tool_use_id)
+
+ # Merge discover_tools output into platform-native config
+ merge_discovered_tools(tool_config, discover_result, provider="anthropic", target="bedrock")
+"""
+
+from __future__ import annotations
+
+import json
+from typing import Any, Dict, List, Literal, Optional, Set, Union
+
+PlatformTarget = Literal["bedrock", "vertex"]
+ResultTarget = Literal["bedrock", "vertex", "anthropic", "openai"]
+ToolProvider = Literal["openai", "anthropic", "vercel", "generic"]
+
+# JSON Schema keywords unsupported by each platform.
+_UNSUPPORTED_KEYWORDS: Dict[PlatformTarget, Set[str]] = {
+ "vertex": {"const"},
+ "bedrock": set(),
+}
+
+
+# ------------------------------------------------------------------
+# sanitize_tool_schemas
+# ------------------------------------------------------------------
+
+
+def sanitize_tool_schemas(tools: List[Any], target: PlatformTarget) -> List[Any]:
+ """Recursively strip JSON Schema keywords that the target platform doesn't support.
+
+ Returns a new list — the original tools are not mutated.
+ """
+ blocked = _UNSUPPORTED_KEYWORDS.get(target)
+ if not blocked:
+ return tools
+ return [_deep_strip_keys(t, blocked) for t in tools]
+
+
+def _deep_strip_keys(obj: Any, blocked: Set[str]) -> Any:
+ if isinstance(obj, list):
+ return [_deep_strip_keys(item, blocked) for item in obj]
+ if isinstance(obj, dict):
+ return {k: _deep_strip_keys(v, blocked) for k, v in obj.items() if k not in blocked}
+ return obj
+
+
+# ------------------------------------------------------------------
+# format_tool_result
+# ------------------------------------------------------------------
+
+
+def format_tool_result(
+ result: Any,
+ *,
+ target: ResultTarget,
+ tool_use_id: Optional[str] = None,
+ name: Optional[str] = None,
+) -> Any:
+ """Wrap a raw ``dispatch_superdoc_tool`` result in the platform-native shape."""
+ if target == "bedrock":
+ # Bedrock requires json content to be a plain dict
+ json_result = result if isinstance(result, dict) else {"result": result}
+ return {"toolResult": {"toolUseId": tool_use_id, "content": [{"json": json_result}]}}
+
+ if target == "vertex":
+ return {"functionResponse": {"name": name, "response": result}}
+
+ if target == "anthropic":
+ return {
+ "type": "tool_result",
+ "tool_use_id": tool_use_id,
+ "content": json.dumps(result),
+ }
+
+ if target == "openai":
+ return {
+ "role": "tool",
+ "tool_call_id": tool_use_id,
+ "content": json.dumps(result),
+ }
+
+ return result
+
+
+def format_tool_error(
+ error: Union[Exception, str],
+ *,
+ target: ResultTarget,
+ tool_use_id: Optional[str] = None,
+ name: Optional[str] = None,
+) -> Any:
+ """Format an error from a failed tool call in the platform-native error shape."""
+ message = str(error)
+
+ if target == "bedrock":
+ return {
+ "toolResult": {
+ "toolUseId": tool_use_id,
+ "content": [{"text": f"Error: {message}"}],
+ "status": "error",
+ }
+ }
+
+ if target == "vertex":
+ return {"functionResponse": {"name": name, "response": {"error": message}}}
+
+ if target == "anthropic":
+ return {
+ "type": "tool_result",
+ "tool_use_id": tool_use_id,
+ "content": f"Error: {message}",
+ "is_error": True,
+ }
+
+ if target == "openai":
+ return {
+ "role": "tool",
+ "tool_call_id": tool_use_id,
+ "content": f"Error: {message}",
+ }
+
+ return {"error": message}
+
+
+# ------------------------------------------------------------------
+# merge_discovered_tools
+# ------------------------------------------------------------------
+
+
+def merge_discovered_tools(
+ tool_config: Any,
+ discover_result: Any,
+ *,
+ provider: ToolProvider,
+ target: Optional[PlatformTarget] = None,
+) -> int:
+ """Extract newly discovered tools, convert to platform format, and merge into config.
+
+ Mutates ``tool_config`` in place. Returns the number of new tools added.
+ """
+ new_tools = _extract_discovered_tools(discover_result)
+ if not new_tools:
+ return 0
+
+ existing_names = _collect_existing_names(tool_config, target)
+ added = 0
+
+ for tool in new_tools:
+ tool_name = _extract_tool_name(tool, provider)
+ if not tool_name or tool_name in existing_names:
+ continue
+ existing_names.add(tool_name)
+
+ formatted = _format_tool_for_config(tool, provider, target)
+ _push_to_config(tool_config, formatted, target)
+ added += 1
+
+ return added
+
+
+def _extract_discovered_tools(result: Any) -> List[Any]:
+ if isinstance(result, dict) and isinstance(result.get("tools"), list):
+ return result["tools"]
+ return []
+
+
+def _extract_tool_name(tool: Any, provider: ToolProvider) -> Optional[str]:
+ if not isinstance(tool, dict):
+ return None
+ # Anthropic / Generic: top-level name
+ if isinstance(tool.get("name"), str):
+ return tool["name"]
+ # OpenAI / Vercel: nested under function.name
+ fn = tool.get("function")
+ if isinstance(fn, dict) and isinstance(fn.get("name"), str):
+ return fn["name"]
+ return None
+
+
+def _collect_existing_names(tool_config: Any, target: Optional[PlatformTarget]) -> Set[str]:
+ names: Set[str] = set()
+
+ if target == "bedrock" and isinstance(tool_config, dict):
+ for t in tool_config.get("tools", []):
+ spec = t.get("toolSpec", {}) if isinstance(t, dict) else {}
+ if isinstance(spec.get("name"), str):
+ names.add(spec["name"])
+ elif target == "vertex" and isinstance(tool_config, list) and tool_config:
+ decls = tool_config[0].get("functionDeclarations", []) if isinstance(tool_config[0], dict) else []
+ for d in decls:
+ if isinstance(d, dict) and isinstance(d.get("name"), str):
+ names.add(d["name"])
+ elif isinstance(tool_config, list):
+ for t in tool_config:
+ if isinstance(t, dict):
+ if isinstance(t.get("name"), str):
+ names.add(t["name"])
+ fn = t.get("function")
+ if isinstance(fn, dict) and isinstance(fn.get("name"), str):
+ names.add(fn["name"])
+
+ return names
+
+
+def _format_tool_for_config(
+ tool: Any, provider: ToolProvider, target: Optional[PlatformTarget]
+) -> Any:
+ if not isinstance(tool, dict):
+ return tool
+
+ if target == "bedrock":
+ return {
+ "toolSpec": {
+ "name": tool.get("name"),
+ "description": tool.get("description"),
+ "inputSchema": {"json": tool.get("input_schema") or tool.get("parameters")},
+ }
+ }
+
+ if target == "vertex":
+ params = tool.get("parameters") or tool.get("input_schema")
+ if params:
+ params = _deep_strip_keys(params, _UNSUPPORTED_KEYWORDS["vertex"])
+ return {
+ "name": tool.get("name"),
+ "description": tool.get("description"),
+ "parameters": params,
+ }
+
+ return tool
+
+
+def _push_to_config(tool_config: Any, formatted: Any, target: Optional[PlatformTarget]) -> None:
+ if target == "bedrock" and isinstance(tool_config, dict):
+ tools = tool_config.get("tools")
+ if isinstance(tools, list):
+ tools.append(formatted)
+ elif target == "vertex" and isinstance(tool_config, list) and tool_config:
+ decls = tool_config[0].get("functionDeclarations") if isinstance(tool_config[0], dict) else None
+ if isinstance(decls, list):
+ decls.append(formatted)
+ elif isinstance(tool_config, list):
+ tool_config.append(formatted)
diff --git a/packages/sdk/langs/python/superdoc/tools_api.py b/packages/sdk/langs/python/superdoc/tools_api.py
index 26cdce9b3d..299addbd5f 100644
--- a/packages/sdk/langs/python/superdoc/tools_api.py
+++ b/packages/sdk/langs/python/superdoc/tools_api.py
@@ -385,6 +385,11 @@ def dispatch_superdoc_tool(
if not isinstance(payload, dict):
raise SuperDocError('Tool arguments must be an object.', code='INVALID_ARGUMENT', details={'toolName': tool_name})
+ # Strip doc/sessionId — the SDK client manages session targeting after doc.open().
+ # Models fill these in because the tool schemas expose them, but passing them
+ # alongside an active session causes errors.
+ payload = {k: v for k, v in payload.items() if k not in ('doc', 'sessionId')}
+
_validate_dispatch_args(operation_id, payload)
method = _resolve_doc_method(client, operation_id)
@@ -413,6 +418,9 @@ async def dispatch_superdoc_tool_async(
if not isinstance(payload, dict):
raise SuperDocError('Tool arguments must be an object.', code='INVALID_ARGUMENT', details={'toolName': tool_name})
+ # Strip doc/sessionId — same as sync version above.
+ payload = {k: v for k, v in payload.items() if k not in ('doc', 'sessionId')}
+
_validate_dispatch_args(operation_id, payload)
method = _resolve_doc_method(client, operation_id)
kwargs = dict(invoke_options or {})
diff --git a/packages/sdk/langs/python/tests/test_platform_helpers.py b/packages/sdk/langs/python/tests/test_platform_helpers.py
new file mode 100644
index 0000000000..df0f1c96b1
--- /dev/null
+++ b/packages/sdk/langs/python/tests/test_platform_helpers.py
@@ -0,0 +1,244 @@
+"""Tests for platform helpers (sanitize, format, merge)."""
+
+import json
+import os
+import sys
+
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
+
+from superdoc.helpers.platform import (
+ format_tool_error,
+ format_tool_result,
+ merge_discovered_tools,
+ sanitize_tool_schemas,
+)
+
+
+# ------------------------------------------------------------------
+# sanitize_tool_schemas
+# ------------------------------------------------------------------
+
+
+class TestSanitizeToolSchemas:
+ def test_strips_const_for_vertex(self):
+ tools = [
+ {
+ "name": "query_match",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "matchKind": {"const": "text"},
+ "query": {"type": "string"},
+ },
+ },
+ }
+ ]
+
+ result = sanitize_tool_schemas(tools, "vertex")
+
+ assert result[0]["parameters"]["properties"]["matchKind"] == {}
+ assert result[0]["parameters"]["properties"]["query"] == {"type": "string"}
+
+ def test_strips_const_recursively(self):
+ tools = [
+ {
+ "name": "test",
+ "parameters": {
+ "oneOf": [
+ {"properties": {"kind": {"const": "a"}, "value": {"type": "string"}}},
+ {"properties": {"kind": {"const": "b"}, "value": {"type": "number"}}},
+ ]
+ },
+ }
+ ]
+
+ result = sanitize_tool_schemas(tools, "vertex")
+
+ assert result[0]["parameters"]["oneOf"][0]["properties"]["kind"] == {}
+ assert result[0]["parameters"]["oneOf"][1]["properties"]["kind"] == {}
+
+ def test_does_not_mutate_original(self):
+ tools = [{"name": "test", "parameters": {"properties": {"x": {"const": "a"}}}}]
+ original = json.dumps(tools)
+
+ sanitize_tool_schemas(tools, "vertex")
+
+ assert json.dumps(tools) == original
+
+ def test_noop_for_bedrock(self):
+ tools = [{"name": "test", "parameters": {"properties": {"x": {"const": "a"}}}}]
+ result = sanitize_tool_schemas(tools, "bedrock")
+ assert result is tools
+
+ def test_empty_array(self):
+ assert sanitize_tool_schemas([], "vertex") == []
+
+
+# ------------------------------------------------------------------
+# format_tool_result
+# ------------------------------------------------------------------
+
+
+class TestFormatToolResult:
+ def test_bedrock_object_result(self):
+ result = format_tool_result({"text": "hello"}, target="bedrock", tool_use_id="tu-1")
+ assert result == {
+ "toolResult": {"toolUseId": "tu-1", "content": [{"json": {"text": "hello"}}]}
+ }
+
+ def test_bedrock_array_result_wrapped(self):
+ result = format_tool_result([1, 2, 3], target="bedrock", tool_use_id="tu-1")
+ assert result == {
+ "toolResult": {"toolUseId": "tu-1", "content": [{"json": {"result": [1, 2, 3]}}]}
+ }
+
+ def test_bedrock_string_result_wrapped(self):
+ result = format_tool_result("hello", target="bedrock", tool_use_id="tu-1")
+ assert result == {
+ "toolResult": {"toolUseId": "tu-1", "content": [{"json": {"result": "hello"}}]}
+ }
+
+ def test_bedrock_none_result_wrapped(self):
+ result = format_tool_result(None, target="bedrock", tool_use_id="tu-1")
+ assert result == {
+ "toolResult": {"toolUseId": "tu-1", "content": [{"json": {"result": None}}]}
+ }
+
+ def test_vertex_result(self):
+ result = format_tool_result({"data": 1}, target="vertex", name="get_text")
+ assert result == {"functionResponse": {"name": "get_text", "response": {"data": 1}}}
+
+ def test_anthropic_result(self):
+ result = format_tool_result({"ok": True}, target="anthropic", tool_use_id="tu-1")
+ assert result == {
+ "type": "tool_result",
+ "tool_use_id": "tu-1",
+ "content": '{"ok": true}',
+ }
+
+ def test_openai_result(self):
+ result = format_tool_result({"ok": True}, target="openai", tool_use_id="call-1", name="fn")
+ assert result == {
+ "role": "tool",
+ "tool_call_id": "call-1",
+ "content": '{"ok": true}',
+ }
+
+
+# ------------------------------------------------------------------
+# format_tool_error
+# ------------------------------------------------------------------
+
+
+class TestFormatToolError:
+ def test_bedrock_error(self):
+ result = format_tool_error(Exception("boom"), target="bedrock", tool_use_id="tu-1")
+ assert result == {
+ "toolResult": {
+ "toolUseId": "tu-1",
+ "content": [{"text": "Error: boom"}],
+ "status": "error",
+ }
+ }
+
+ def test_vertex_error(self):
+ result = format_tool_error("fail", target="vertex", name="fn")
+ assert result == {"functionResponse": {"name": "fn", "response": {"error": "fail"}}}
+
+ def test_anthropic_error(self):
+ result = format_tool_error(Exception("nope"), target="anthropic", tool_use_id="tu-1")
+ assert result == {
+ "type": "tool_result",
+ "tool_use_id": "tu-1",
+ "content": "Error: nope",
+ "is_error": True,
+ }
+
+ def test_openai_error(self):
+ result = format_tool_error(Exception("bad"), target="openai", tool_use_id="call-1")
+ assert result == {"role": "tool", "tool_call_id": "call-1", "content": "Error: bad"}
+
+
+# ------------------------------------------------------------------
+# merge_discovered_tools
+# ------------------------------------------------------------------
+
+
+class TestMergeDiscoveredTools:
+ anthropic_tools = [
+ {"name": "add_comment", "description": "Add a comment", "input_schema": {"type": "object"}},
+ {"name": "format_bold", "description": "Bold text", "input_schema": {"type": "object"}},
+ ]
+
+ generic_tools = [
+ {
+ "name": "add_comment",
+ "description": "Add a comment",
+ "parameters": {"type": "object", "properties": {"kind": {"const": "inline"}}},
+ },
+ {"name": "format_bold", "description": "Bold text", "parameters": {"type": "object"}},
+ ]
+
+ def test_bedrock_merge(self):
+ tool_config = {
+ "tools": [{"toolSpec": {"name": "existing", "description": "x", "inputSchema": {"json": {}}}}]
+ }
+ result = {"tools": self.anthropic_tools}
+
+ count = merge_discovered_tools(tool_config, result, provider="anthropic", target="bedrock")
+
+ assert count == 2
+ assert len(tool_config["tools"]) == 3
+ assert tool_config["tools"][1] == {
+ "toolSpec": {
+ "name": "add_comment",
+ "description": "Add a comment",
+ "inputSchema": {"json": {"type": "object"}},
+ }
+ }
+
+ def test_bedrock_skips_duplicates(self):
+ tool_config = {
+ "tools": [{"toolSpec": {"name": "add_comment", "description": "x", "inputSchema": {"json": {}}}}]
+ }
+ result = {"tools": self.anthropic_tools}
+
+ count = merge_discovered_tools(tool_config, result, provider="anthropic", target="bedrock")
+
+ assert count == 1
+ assert len(tool_config["tools"]) == 2
+
+ def test_vertex_merge_sanitizes_schemas(self):
+ tool_config = [{"functionDeclarations": [{"name": "existing", "description": "x", "parameters": {}}]}]
+ result = {"tools": self.generic_tools}
+
+ count = merge_discovered_tools(tool_config, result, provider="generic", target="vertex")
+
+ assert count == 2
+ assert len(tool_config[0]["functionDeclarations"]) == 3
+ add_comment = tool_config[0]["functionDeclarations"][1]
+ assert '"const"' not in json.dumps(add_comment)
+
+ def test_plain_array_merge(self):
+ tool_config = [{"name": "existing", "description": "x", "input_schema": {}}]
+ result = {"tools": self.anthropic_tools}
+
+ count = merge_discovered_tools(tool_config, result, provider="anthropic")
+
+ assert count == 2
+ assert len(tool_config) == 3
+
+ def test_empty_discover_result(self):
+ tool_config = {"tools": []}
+ count = merge_discovered_tools(tool_config, {}, provider="anthropic", target="bedrock")
+ assert count == 0
+
+ def test_non_object_discover_result(self):
+ tool_config = {"tools": []}
+ count = merge_discovered_tools(tool_config, "not an object", provider="anthropic", target="bedrock")
+ assert count == 0
+
+ def test_none_discover_result(self):
+ tool_config = {"tools": []}
+ count = merge_discovered_tools(tool_config, None, provider="anthropic", target="bedrock")
+ assert count == 0