From 5f689d04e81ae963a22b75d1c2b6e1c805fb55a8 Mon Sep 17 00:00:00 2001 From: Connor Kirkpatrick Date: Thu, 4 Dec 2025 15:55:59 +0000 Subject: [PATCH 1/5] Add AI Workflow Lambda durable function snippets --- lambda-durable-ai-agent-with-tools/example.py | 92 ++++++++++++++++++ lambda-durable-ai-agent-with-tools/example.ts | 95 +++++++++++++++++++ .../snippet-data.json | 66 +++++++++++++ lambda-durable-ai-human-review/example.py | 49 ++++++++++ lambda-durable-ai-human-review/example.ts | 48 ++++++++++ .../snippet-data.json | 66 +++++++++++++ lambda-durable-ai-llm-as-judge/example.py | 54 +++++++++++ lambda-durable-ai-llm-as-judge/example.ts | 48 ++++++++++ .../snippet-data.json | 66 +++++++++++++ .../example.py | 27 ++++++ .../example.ts | 31 ++++++ .../snippet-data.json | 66 +++++++++++++ lambda-durable-ai-prompt-chaining/example.py | 22 +++++ lambda-durable-ai-prompt-chaining/example.ts | 23 +++++ .../snippet-data.json | 66 +++++++++++++ .../example.py | 33 +++++++ .../example.ts | 31 ++++++ .../snippet-data.json | 73 ++++++++++++++ 18 files changed, 956 insertions(+) create mode 100644 lambda-durable-ai-agent-with-tools/example.py create mode 100644 lambda-durable-ai-agent-with-tools/example.ts create mode 100644 lambda-durable-ai-agent-with-tools/snippet-data.json create mode 100644 lambda-durable-ai-human-review/example.py create mode 100644 lambda-durable-ai-human-review/example.ts create mode 100644 lambda-durable-ai-human-review/snippet-data.json create mode 100644 lambda-durable-ai-llm-as-judge/example.py create mode 100644 lambda-durable-ai-llm-as-judge/example.ts create mode 100644 lambda-durable-ai-llm-as-judge/snippet-data.json create mode 100644 lambda-durable-ai-parallel-invocation/example.py create mode 100644 lambda-durable-ai-parallel-invocation/example.ts create mode 100644 lambda-durable-ai-parallel-invocation/snippet-data.json create mode 100644 lambda-durable-ai-prompt-chaining/example.py create mode 100644 lambda-durable-ai-prompt-chaining/example.ts create mode 100644 lambda-durable-ai-prompt-chaining/snippet-data.json create mode 100644 lambda-durable-ai-structured-output/example.py create mode 100644 lambda-durable-ai-structured-output/example.ts create mode 100644 lambda-durable-ai-structured-output/snippet-data.json diff --git a/lambda-durable-ai-agent-with-tools/example.py b/lambda-durable-ai-agent-with-tools/example.py new file mode 100644 index 0000000..eaa130c --- /dev/null +++ b/lambda-durable-ai-agent-with-tools/example.py @@ -0,0 +1,92 @@ +from typing import Any, Callable + +import boto3 +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +MODEL_ID = "us.amazon.nova-pro-v1:0" +bedrock = boto3.client("bedrock-runtime") + + +class AgentTool: + def __init__(self, tool_spec: dict, execute: Callable[[dict, DurableContext], str]): + self.tool_spec = tool_spec + self.execute = execute + + +TOOLS: list[AgentTool] = [ + AgentTool( + tool_spec={ + "name": "get_weather", + "description": "Get the current weather for a location.", + "inputSchema": { + "json": { + "type": "object", + "properties": {"location": {"type": "string"}}, + "required": ["location"], + } + }, + }, + execute=lambda input, ctx: f"The weather in {input.get('location', 'unknown')} is sunny, 72°F.", + ), + AgentTool( + tool_spec={ + "name": "wait_for_human_review", + "description": "Request human review and wait for response.", + "inputSchema": { + "json": { + "type": "object", + "properties": {"question": {"type": "string"}}, + "required": ["question"], + } + }, + }, + execute=lambda input, ctx: ctx.wait_for_callback( + lambda callback_id, _: print(f"Review needed: {input.get('question')}"), + "human_review", + ), + ), +] + + +@durable_execution +def handler(event: dict, context: DurableContext): + prompt = event.get("prompt", "What's the weather in Seattle?") + messages: list[Any] = [{"role": "user", "content": [{"text": prompt}]}] + tools_by_name = {t.tool_spec["name"]: t for t in TOOLS} + + while True: + response = context.step( + lambda _: bedrock.converse( + modelId=MODEL_ID, + messages=messages, + toolConfig={"tools": [{"toolSpec": t.tool_spec} for t in TOOLS]}, + ), + "converse", + ) + + output = response.get("output", {}).get("message", {}) + messages.append(output) + + if response.get("stopReason") == "end_turn": + for block in output.get("content", []): + if "text" in block: + return block["text"] + return "" + + tool_results = [] + for block in output.get("content", []): + if "toolUse" in block: + tool_use = block["toolUse"] + tool = tools_by_name[tool_use["name"]] + result = context.run_in_child_context( + lambda child_ctx: tool.execute(tool_use.get("input", {}), child_ctx), + f"tool:{tool_use['name']}", + ) + tool_results.append({ + "toolResult": { + "toolUseId": tool_use["toolUseId"], + "content": [{"text": result}], + } + }) + + messages.append({"role": "user", "content": tool_results}) diff --git a/lambda-durable-ai-agent-with-tools/example.ts b/lambda-durable-ai-agent-with-tools/example.ts new file mode 100644 index 0000000..52f5ca3 --- /dev/null +++ b/lambda-durable-ai-agent-with-tools/example.ts @@ -0,0 +1,95 @@ +import { + type DurableContext, + withDurableExecution, +} from "@aws/durable-execution-sdk-js"; +import { + BedrockRuntimeClient, + type ContentBlock, + ConverseCommand, + type Message, + type Tool, +} from "@aws-sdk/client-bedrock-runtime"; + +const MODEL_ID = "us.amazon.nova-pro-v1:0"; +const bedrock = new BedrockRuntimeClient({}); + +type AgentTool = { + toolSpec: NonNullable; + execute: (input: Record, context: DurableContext) => Promise; +}; + +const tools: AgentTool[] = [ + { + toolSpec: { + name: "get_weather", + description: "Get the current weather for a location.", + inputSchema: { + json: { + type: "object", + properties: { location: { type: "string" } }, + required: ["location"], + }, + }, + }, + execute: async (input) => `The weather in ${input.location} is sunny, 72°F.`, + }, + { + toolSpec: { + name: "wait_for_human_review", + description: "Request human review and wait for response.", + inputSchema: { + json: { + type: "object", + properties: { question: { type: "string" } }, + required: ["question"], + }, + }, + }, + execute: async (input, context) => + context.waitForCallback("human_review", async (callbackId) => { + console.log(`Review needed: ${input.question}`); + }), + }, +]; + +export const handler = withDurableExecution( + async (event: { prompt?: string }, context: DurableContext) => { + const prompt = event.prompt ?? "What's the weather in Seattle?"; + const messages: Message[] = [{ role: "user", content: [{ text: prompt }] }]; + const toolsByName = Object.fromEntries(tools.map((t) => [t.toolSpec.name, t])); + + while (true) { + const response = await context.step("converse", async () => { + return bedrock.send( + new ConverseCommand({ + modelId: MODEL_ID, + messages, + toolConfig: { tools: tools.map((t) => ({ toolSpec: t.toolSpec })) }, + }) + ); + }); + + const output = response.output!.message!; + messages.push(output); + + if (response.stopReason === "end_turn") { + const textBlock = output.content?.find((b): b is ContentBlock.TextMember => "text" in b); + return textBlock?.text ?? ""; + } + + const toolResults: ContentBlock[] = []; + for (const block of output.content ?? []) { + if ("toolUse" in block && block.toolUse) { + const { toolUseId, name, input } = block.toolUse; + const tool = toolsByName[name!]; + const result = await context.runInChildContext(`tool:${name}`, async (childContext) => { + return tool.execute(input as Record, childContext); + }); + toolResults.push({ toolResult: { toolUseId, content: [{ text: result }] } }); + } + } + + messages.push({ role: "user", content: toolResults }); + } + } +); diff --git a/lambda-durable-ai-agent-with-tools/snippet-data.json b/lambda-durable-ai-agent-with-tools/snippet-data.json new file mode 100644 index 0000000..8a4de06 --- /dev/null +++ b/lambda-durable-ai-agent-with-tools/snippet-data.json @@ -0,0 +1,66 @@ +{ + "title": "Agent with Tools using Lambda Durable Functions", + "description": "Agentic loop where the LLM can call tools, including tools that suspend for human input, using AWS Lambda durable functions", + "type": "Integration", + "services": ["lambda", "bedrock"], + "languages": ["Python", "TypeScript"], + "tags": ["ai", "durable-functions", "bedrock", "genai", "agent", "tools", "function-calling"], + "introBox": { + "headline": "How it works", + "text": [ + "This pattern demonstrates an agentic loop with AWS Lambda durable functions. The LLM can call tools, and each model call and tool execution is checkpointed for deterministic replay.", + "Tools can use durable context features like waitForCallback to suspend the agent while awaiting external input.", + "Benefits: Complex agent loops are expressed as simple sequential code. Each tool call is checkpointed for resilient execution. Tools can suspend for human input without consuming compute resources." + ] + }, + "gitHub": { + "template": { + "repoURL": "https://github.com/aws-samples/sample-ai-workflows-in-aws-lambda-durable-functions" + } + }, + "snippets": [ + { + "title": "Runtimes", + "codeTabs": [ + { + "id": "Python", + "title": "Usage Example with Python:", + "description": "Agent with tools using Lambda durable functions in Python.", + "snippets": [ + { + "snippetPath": "example.py", + "language": "py" + } + ] + }, + { + "id": "TypeScript", + "title": "Usage Example with TypeScript:", + "description": "Agent with tools using Lambda durable functions in TypeScript.", + "snippets": [ + { + "snippetPath": "example.ts", + "language": "ts" + } + ] + } + ] + } + ], + "resources": { + "bullets": [ + { + "text": "AWS Lambda durable functions", + "link": "https://aws.amazon.com/lambda/lambda-durable-functions/" + } + ] + }, + "authors": [ + { + "headline": "Presented by Connor Kirkpatrick", + "name": "Connor Kirkpatrick", + "bio": "Solution Engineer at AWS", + "linkedin": "connorkirkpatrick" + } + ] +} diff --git a/lambda-durable-ai-human-review/example.py b/lambda-durable-ai-human-review/example.py new file mode 100644 index 0000000..4a8ac0f --- /dev/null +++ b/lambda-durable-ai-human-review/example.py @@ -0,0 +1,49 @@ +import json + +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import Duration, WaitForCallbackConfig +from pydantic import BaseModel + +from utils.converse import converse + +MODEL_ID = "us.amazon.nova-pro-v1:0" + + +class ReviewResult(BaseModel): + approved: bool + notes: str | None = None + + +def send_for_review(callback_id: str, document: str, extracted_fields: str): + print(f"Review needed for document. Callback ID: {callback_id}") + print(f"Extracted fields: {extracted_fields}") + + +@durable_execution +def handler(event: dict, context: DurableContext): + document = event.get("document", "Sample invoice with amount $1,234.56") + + extracted_fields = context.step( + lambda _: converse( + MODEL_ID, + f'Extract key fields from this document as JSON: "{document}"', + ), + "extract fields", + ) + + review_result_str = context.wait_for_callback( + lambda callback_id, _: send_for_review(callback_id, document, extracted_fields), + "Await Human review", + WaitForCallbackConfig(timeout=Duration.from_days(7)), + ) + + review_result = ReviewResult(**json.loads(review_result_str)) + + if not review_result.approved: + return { + "status": "rejected", + "notes": review_result.notes, + "extractedFields": extracted_fields, + } + + return {"status": "approved", "extractedFields": extracted_fields} diff --git a/lambda-durable-ai-human-review/example.ts b/lambda-durable-ai-human-review/example.ts new file mode 100644 index 0000000..1787e73 --- /dev/null +++ b/lambda-durable-ai-human-review/example.ts @@ -0,0 +1,48 @@ +import { + type DurableContext, + withDurableExecution, +} from "@aws/durable-execution-sdk-js"; +import { z } from "zod"; +import { converse } from "./utils/converse.js"; + +const MODEL_ID = "us.amazon.nova-pro-v1:0"; + +const ReviewResultSchema = z.object({ + approved: z.boolean(), + notes: z.string().optional(), +}); + +const sendForReview = ( + callbackId: string, + document: string, + extractedFields: string +) => { + console.log(`Review needed for document. Callback ID: ${callbackId}`); + console.log(`Extracted fields: ${extractedFields}`); +}; + +export const handler = withDurableExecution( + async (event: { document?: string }, context: DurableContext) => { + const document = event.document ?? "Sample invoice with amount $1,234.56"; + + const extractedFields = await context.step("extract fields", async () => + converse(MODEL_ID, `Extract key fields from this document as JSON: "${document}"`) + ); + + const reviewResultStr = await context.waitForCallback( + "Await Human review", + async (callbackId) => { + sendForReview(callbackId, document, extractedFields); + }, + { timeout: { days: 7 } } + ); + + const reviewResult = ReviewResultSchema.parse(JSON.parse(reviewResultStr)); + + if (!reviewResult.approved) { + return { status: "rejected", notes: reviewResult.notes, extractedFields }; + } + + return { status: "approved", extractedFields }; + } +); diff --git a/lambda-durable-ai-human-review/snippet-data.json b/lambda-durable-ai-human-review/snippet-data.json new file mode 100644 index 0000000..f28785c --- /dev/null +++ b/lambda-durable-ai-human-review/snippet-data.json @@ -0,0 +1,66 @@ +{ + "title": "Human Review with Lambda Durable Functions", + "description": "Pause AI workflow for human approval before continuing using AWS Lambda durable functions", + "type": "Integration", + "services": ["lambda", "bedrock"], + "languages": ["Python", "TypeScript"], + "tags": ["ai", "durable-functions", "bedrock", "genai", "human-in-the-loop", "approval"], + "introBox": { + "headline": "How it works", + "text": [ + "This pattern demonstrates human-in-the-loop workflows with AWS Lambda durable functions. The workflow suspends execution while awaiting human review without consuming compute resources.", + "The example extracts fields from a document using an LLM, then waits for human approval before proceeding.", + "Benefits: Pay only for active compute time, not wait periods. Workflows can suspend for up to one year. No infrastructure to manage while waiting for human input." + ] + }, + "gitHub": { + "template": { + "repoURL": "https://github.com/aws-samples/sample-ai-workflows-in-aws-lambda-durable-functions" + } + }, + "snippets": [ + { + "title": "Runtimes", + "codeTabs": [ + { + "id": "Python", + "title": "Usage Example with Python:", + "description": "Human review workflow with Lambda durable functions using Python.", + "snippets": [ + { + "snippetPath": "example.py", + "language": "py" + } + ] + }, + { + "id": "TypeScript", + "title": "Usage Example with TypeScript:", + "description": "Human review workflow with Lambda durable functions using TypeScript.", + "snippets": [ + { + "snippetPath": "example.ts", + "language": "ts" + } + ] + } + ] + } + ], + "resources": { + "bullets": [ + { + "text": "AWS Lambda durable functions", + "link": "https://aws.amazon.com/lambda/lambda-durable-functions/" + } + ] + }, + "authors": [ + { + "headline": "Presented by Connor Kirkpatrick", + "name": "Connor Kirkpatrick", + "bio": "Solution Engineer at AWS", + "linkedin": "connorkirkpatrick" + } + ] +} diff --git a/lambda-durable-ai-llm-as-judge/example.py b/lambda-durable-ai-llm-as-judge/example.py new file mode 100644 index 0000000..041672f --- /dev/null +++ b/lambda-durable-ai-llm-as-judge/example.py @@ -0,0 +1,54 @@ +import json +import re + +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +from utils.converse import converse + +MODELS = ["us.amazon.nova-lite-v1:0", "us.amazon.nova-pro-v1:0"] + + +@durable_execution +def handler(event: dict, context: DurableContext): + question = event.get( + "question", "Write a 100 word summary on the great fire of London" + ) + + result = context.map( + MODELS, + lambda ctx, model_id, idx, items: { + "modelId": model_id, + "answer": converse(model_id, question), + }, + "Get candidate answers", + ) + + candidates = result.get_results() + + def judge(_): + responses = "\n".join( + f"{i + 1}. (Model: {c['modelId']}) {c['answer']}" + for i, c in enumerate(candidates) + ) + prompt = f'''Question: "{question}" + +Responses: +{responses} + +Which response is best? Reply with JSON: {{"bestIndex": <1-based index>, "reasoning": ""}}''' + + response = converse(MODELS[0], prompt) + match = re.search(r"\{[\s\S]*\}", response) + parsed = json.loads(match.group(0)) if match else {} + best_index = (parsed.get("bestIndex", 1)) - 1 + best = candidates[best_index] if 0 <= best_index < len(candidates) else candidates[0] + + return { + "bestAnswer": best["answer"], + "reasoning": parsed.get("reasoning", ""), + "sourceModel": best["modelId"], + } + + judgment = context.step(judge, "judge") + + return {"question": question, **judgment} diff --git a/lambda-durable-ai-llm-as-judge/example.ts b/lambda-durable-ai-llm-as-judge/example.ts new file mode 100644 index 0000000..5f3b32e --- /dev/null +++ b/lambda-durable-ai-llm-as-judge/example.ts @@ -0,0 +1,48 @@ +import { + type DurableContext, + withDurableExecution, +} from "@aws/durable-execution-sdk-js"; +import { converse } from "./utils/converse.js"; + +const MODELS = ["us.amazon.nova-lite-v1:0", "us.amazon.nova-pro-v1:0"]; + +export const handler = withDurableExecution( + async (event: { question?: string }, context: DurableContext) => { + const question = + event.question ?? "Write a 100 word summary on the great fire of London"; + + const result = await context.map( + "Get candidate answers", + MODELS, + async (_, modelId) => ({ + modelId, + answer: await converse(modelId, question), + }), + { itemNamer: (item) => `candidate-${item}` } + ); + + const candidates = result.getResults(); + + const judgment = await context.step("judge", async () => { + const prompt = `Question: "${question}" + +Responses: +${candidates.map((r, i) => `${i + 1}. (Model: ${r.modelId}) ${r.answer}`).join("\n")} + +Which response is best? Reply with JSON: {"bestIndex": <1-based index>, "reasoning": ""}`; + + const response = await converse(MODELS[0], prompt); + const parsed = JSON.parse(response.match(/\{[\s\S]*\}/)?.[0] ?? "{}"); + const bestIndex = (parsed.bestIndex ?? 1) - 1; + const best = candidates[bestIndex] ?? candidates[0]; + + return { + bestAnswer: best.answer, + reasoning: parsed.reasoning ?? "", + sourceModel: best.modelId, + }; + }); + + return { question, ...judgment }; + } +); diff --git a/lambda-durable-ai-llm-as-judge/snippet-data.json b/lambda-durable-ai-llm-as-judge/snippet-data.json new file mode 100644 index 0000000..56e1549 --- /dev/null +++ b/lambda-durable-ai-llm-as-judge/snippet-data.json @@ -0,0 +1,66 @@ +{ + "title": "LLM as Judge with Lambda Durable Functions", + "description": "Get multiple AI responses in parallel, then use another LLM to pick the best one using AWS Lambda durable functions", + "type": "Integration", + "services": ["lambda", "bedrock"], + "languages": ["Python", "TypeScript"], + "tags": ["ai", "durable-functions", "bedrock", "genai", "llm-as-judge", "evaluation"], + "introBox": { + "headline": "How it works", + "text": [ + "This pattern demonstrates the LLM-as-judge pattern with AWS Lambda durable functions. Multiple models answer a question in parallel, then another LLM evaluates and selects the best response.", + "All candidate answers and the final judgment are checkpointed for deterministic replay.", + "Benefits: Parallel execution reduces total workflow time. Checkpointing prevents re-running expensive LLM calls on retry. Deterministic replay ensures consistent results." + ] + }, + "gitHub": { + "template": { + "repoURL": "https://github.com/aws-samples/sample-ai-workflows-in-aws-lambda-durable-functions" + } + }, + "snippets": [ + { + "title": "Runtimes", + "codeTabs": [ + { + "id": "Python", + "title": "Usage Example with Python:", + "description": "LLM as judge with Lambda durable functions using Python.", + "snippets": [ + { + "snippetPath": "example.py", + "language": "py" + } + ] + }, + { + "id": "TypeScript", + "title": "Usage Example with TypeScript:", + "description": "LLM as judge with Lambda durable functions using TypeScript.", + "snippets": [ + { + "snippetPath": "example.ts", + "language": "ts" + } + ] + } + ] + } + ], + "resources": { + "bullets": [ + { + "text": "AWS Lambda durable functions", + "link": "https://aws.amazon.com/lambda/lambda-durable-functions/" + } + ] + }, + "authors": [ + { + "headline": "Presented by Connor Kirkpatrick", + "name": "Connor Kirkpatrick", + "bio": "Solution Engineer at AWS", + "linkedin": "connorkirkpatrick" + } + ] +} diff --git a/lambda-durable-ai-parallel-invocation/example.py b/lambda-durable-ai-parallel-invocation/example.py new file mode 100644 index 0000000..203f8f4 --- /dev/null +++ b/lambda-durable-ai-parallel-invocation/example.py @@ -0,0 +1,27 @@ +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +from utils.converse import converse + +MODEL_ID = "us.amazon.nova-lite-v1:0" + +PROMPTS = [ + "Explain the benefits of", + "Describe the challenges of", + "Summarize the future of", +] + + +@durable_execution +def handler(event: dict, context: DurableContext): + topic = event.get("topic", "artificial intelligence") + + result = context.map( + PROMPTS, + lambda ctx, prompt, idx, items: { + "prompt": prompt, + "response": converse(MODEL_ID, f"{prompt} {topic}"), + }, + "Get perspectives", + ) + + return {"topic": topic, "perspectives": result.get_results()} diff --git a/lambda-durable-ai-parallel-invocation/example.ts b/lambda-durable-ai-parallel-invocation/example.ts new file mode 100644 index 0000000..69f1927 --- /dev/null +++ b/lambda-durable-ai-parallel-invocation/example.ts @@ -0,0 +1,31 @@ +import { + type DurableContext, + withDurableExecution, +} from "@aws/durable-execution-sdk-js"; +import { converse } from "./utils/converse.js"; + +const MODEL_ID = "us.amazon.nova-lite-v1:0"; + +const PROMPTS = [ + "Explain the benefits of", + "Describe the challenges of", + "Summarize the future of", +]; + +export const handler = withDurableExecution( + async (event: { topic?: string }, context: DurableContext) => { + const topic = event.topic ?? "artificial intelligence"; + + const result = await context.map( + "Get perspectives", + PROMPTS, + async (_, prompt) => ({ + prompt, + response: await converse(MODEL_ID, `${prompt} ${topic}`), + }), + { itemNamer: (_, i) => `prompt-${i}` } + ); + + return { topic, perspectives: result.getResults() }; + } +); diff --git a/lambda-durable-ai-parallel-invocation/snippet-data.json b/lambda-durable-ai-parallel-invocation/snippet-data.json new file mode 100644 index 0000000..6f5f596 --- /dev/null +++ b/lambda-durable-ai-parallel-invocation/snippet-data.json @@ -0,0 +1,66 @@ +{ + "title": "Parallel Invocation with Lambda Durable Functions", + "description": "Run multiple LLM prompts simultaneously with individual checkpointing using AWS Lambda durable functions", + "type": "Integration", + "services": ["lambda", "bedrock"], + "languages": ["Python", "TypeScript"], + "tags": ["ai", "durable-functions", "bedrock", "genai", "parallel"], + "introBox": { + "headline": "How it works", + "text": [ + "This pattern demonstrates parallel invocation with AWS Lambda durable functions. Multiple LLM calls execute concurrently, with each response checkpointed independently.", + "The example sends multiple prompts about a topic in parallel, gathering different perspectives simultaneously.", + "Benefits: Faster execution through concurrent LLM calls. Individual checkpointing means partial failures don't require re-running successful calls. Automatic retry handling for each parallel operation." + ] + }, + "gitHub": { + "template": { + "repoURL": "https://github.com/aws-samples/sample-ai-workflows-in-aws-lambda-durable-functions" + } + }, + "snippets": [ + { + "title": "Runtimes", + "codeTabs": [ + { + "id": "Python", + "title": "Usage Example with Python:", + "description": "Parallel invocation with Lambda durable functions using Python.", + "snippets": [ + { + "snippetPath": "example.py", + "language": "py" + } + ] + }, + { + "id": "TypeScript", + "title": "Usage Example with TypeScript:", + "description": "Parallel invocation with Lambda durable functions using TypeScript.", + "snippets": [ + { + "snippetPath": "example.ts", + "language": "ts" + } + ] + } + ] + } + ], + "resources": { + "bullets": [ + { + "text": "AWS Lambda durable functions", + "link": "https://aws.amazon.com/lambda/lambda-durable-functions/" + } + ] + }, + "authors": [ + { + "headline": "Presented by Connor Kirkpatrick", + "name": "Connor Kirkpatrick", + "bio": "Solution Engineer at AWS", + "linkedin": "connorkirkpatrick" + } + ] +} diff --git a/lambda-durable-ai-prompt-chaining/example.py b/lambda-durable-ai-prompt-chaining/example.py new file mode 100644 index 0000000..b745517 --- /dev/null +++ b/lambda-durable-ai-prompt-chaining/example.py @@ -0,0 +1,22 @@ +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +from utils.converse import converse + +MODEL_ID = "us.amazon.nova-lite-v1:0" + + +@durable_execution +def handler(event: dict, context: DurableContext): + topic = event.get("topic", "programming") + + joke = context.step( + lambda _: converse(MODEL_ID, f"Make a joke about {topic}"), + "generate joke", + ) + + review = context.step( + lambda _: converse(MODEL_ID, f'Rate this joke 1-10 and explain why: "{joke}"'), + "review joke", + ) + + return {"joke": joke, "review": review} diff --git a/lambda-durable-ai-prompt-chaining/example.ts b/lambda-durable-ai-prompt-chaining/example.ts new file mode 100644 index 0000000..57e0e8d --- /dev/null +++ b/lambda-durable-ai-prompt-chaining/example.ts @@ -0,0 +1,23 @@ +import { + type DurableContext, + withDurableExecution, +} from "@aws/durable-execution-sdk-js"; +import { converse } from "./utils/converse.js"; + +const MODEL_ID = "us.amazon.nova-lite-v1:0"; + +export const handler = withDurableExecution( + async (event: { topic?: string }, context: DurableContext) => { + const topic = event.topic ?? "programming"; + + const joke = await context.step("generate joke", async () => { + return await converse(MODEL_ID, `Make a joke about ${topic}`); + }); + + const review = await context.step("review joke", async () => { + return await converse(MODEL_ID, `Rate this joke 1-10 and explain why: "${joke}"`); + }); + + return { joke, review }; + } +); diff --git a/lambda-durable-ai-prompt-chaining/snippet-data.json b/lambda-durable-ai-prompt-chaining/snippet-data.json new file mode 100644 index 0000000..1155c9e --- /dev/null +++ b/lambda-durable-ai-prompt-chaining/snippet-data.json @@ -0,0 +1,66 @@ +{ + "title": "Prompt Chaining with Lambda Durable Functions", + "description": "Sequential LLM calls where each step builds on the previous one using AWS Lambda durable functions", + "type": "Integration", + "services": ["lambda", "bedrock"], + "languages": ["Python", "TypeScript"], + "tags": ["ai", "durable-functions", "bedrock", "genai", "prompt-chaining"], + "introBox": { + "headline": "How it works", + "text": [ + "This pattern demonstrates prompt chaining with AWS Lambda durable functions. Each LLM call is checkpointed, enabling automatic retry on failure and deterministic replay.", + "The example generates a joke about a topic, then uses a second LLM call to review and rate the joke.", + "Benefits: Expensive retries are avoided by resuming from the last checkpoint. Non-deterministic LLM outputs become deterministic through replay. Failed workflows don't restart from scratch." + ] + }, + "gitHub": { + "template": { + "repoURL": "https://github.com/aws-samples/sample-ai-workflows-in-aws-lambda-durable-functions" + } + }, + "snippets": [ + { + "title": "Runtimes", + "codeTabs": [ + { + "id": "Python", + "title": "Usage Example with Python:", + "description": "Prompt chaining with Lambda durable functions using Python.", + "snippets": [ + { + "snippetPath": "example.py", + "language": "py" + } + ] + }, + { + "id": "TypeScript", + "title": "Usage Example with TypeScript:", + "description": "Prompt chaining with Lambda durable functions using TypeScript.", + "snippets": [ + { + "snippetPath": "example.ts", + "language": "ts" + } + ] + } + ] + } + ], + "resources": { + "bullets": [ + { + "text": "AWS Lambda durable functions", + "link": "https://aws.amazon.com/lambda/lambda-durable-functions/" + } + ] + }, + "authors": [ + { + "headline": "Presented by Connor Kirkpatrick", + "name": "Connor Kirkpatrick", + "bio": "Solution Engineer at AWS", + "linkedin": "connorkirkpatrick" + } + ] +} diff --git a/lambda-durable-ai-structured-output/example.py b/lambda-durable-ai-structured-output/example.py new file mode 100644 index 0000000..f373892 --- /dev/null +++ b/lambda-durable-ai-structured-output/example.py @@ -0,0 +1,33 @@ +import json +import re + +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from pydantic import BaseModel + +from utils.converse import converse + +MODEL_ID = "us.amazon.nova-lite-v1:0" + + +class ExtractedContact(BaseModel): + name: str + email: str + company: str + + +def extract_contact(text: str) -> dict: + raw = converse( + MODEL_ID, + f'Extract contact info as JSON with keys "name", "email", "company": {text}', + ) + match = re.search(r"\{[^}]+\}", raw) + if not match: + raise ValueError("No JSON found in response") + data = json.loads(match.group()) + return ExtractedContact(**data).model_dump() + + +@durable_execution +def handler(event: dict, context: DurableContext): + text = event.get("text", "John Smith from Acme Corp, email: john@acme.com") + return context.step(lambda _: extract_contact(text), "extract") diff --git a/lambda-durable-ai-structured-output/example.ts b/lambda-durable-ai-structured-output/example.ts new file mode 100644 index 0000000..aebbf9c --- /dev/null +++ b/lambda-durable-ai-structured-output/example.ts @@ -0,0 +1,31 @@ +import { + type DurableContext, + withDurableExecution, +} from "@aws/durable-execution-sdk-js"; +import { z } from "zod"; +import { converse } from "./utils/converse.js"; + +const MODEL_ID = "us.amazon.nova-lite-v1:0"; + +const ExtractedContact = z.object({ + name: z.string(), + email: z.string(), + company: z.string(), +}); + +async function extractContact(text: string) { + const raw = await converse( + MODEL_ID, + `Extract contact info as JSON with keys "name", "email", "company": ${text}` + ); + const match = raw.match(/\{[^}]+\}/); + if (!match) throw new Error("No JSON found in response"); + return ExtractedContact.parse(JSON.parse(match[0])); +} + +export const handler = withDurableExecution( + async (event: { text?: string }, context: DurableContext) => { + const text = event.text ?? "John Smith from Acme Corp, email: john@acme.com"; + return await context.step("extract", () => extractContact(text)); + } +); diff --git a/lambda-durable-ai-structured-output/snippet-data.json b/lambda-durable-ai-structured-output/snippet-data.json new file mode 100644 index 0000000..da46f0c --- /dev/null +++ b/lambda-durable-ai-structured-output/snippet-data.json @@ -0,0 +1,73 @@ +{ + "title": "Generating Structured Output from LLMs with Lambda Durable Functions", + "description": "Get JSON output from an LLM and validate it against a schema using AWS Lambda durable functions", + "type": "Integration", + "services": ["lambda", "bedrock"], + "languages": ["Python", "TypeScript"], + "tags": [ + "ai", + "durable-functions", + "bedrock", + "genai", + "structured-output", + "validation" + ], + "introBox": { + "headline": "How it works", + "text": [ + "This pattern demonstrates structured output extraction with AWS Lambda durable functions. The LLM response is validated against a schema with automatic retry on validation errors.", + "The example uses an LLM to extract contact information from unstructured text and validates it using Pydantic (Python) or Zod (TypeScript).", + "Benefits: Automatic retry on validation failures without manual error handling. Schema enforcement ensures data quality. Checkpointing prevents re-extraction on transient failures." + ] + }, + "gitHub": { + "template": { + "repoURL": "https://github.com/aws-samples/sample-ai-workflows-in-aws-lambda-durable-functions" + } + }, + "snippets": [ + { + "title": "Runtimes", + "codeTabs": [ + { + "id": "Python", + "title": "Usage Example with Python:", + "description": "Structured output with Lambda durable functions using Python and Pydantic.", + "snippets": [ + { + "snippetPath": "example.py", + "language": "py" + } + ] + }, + { + "id": "TypeScript", + "title": "Usage Example with TypeScript:", + "description": "Structured output with Lambda durable functions using TypeScript and Zod.", + "snippets": [ + { + "snippetPath": "example.ts", + "language": "ts" + } + ] + } + ] + } + ], + "resources": { + "bullets": [ + { + "text": "AWS Lambda durable functions", + "link": "https://aws.amazon.com/lambda/lambda-durable-functions/" + } + ] + }, + "authors": [ + { + "headline": "Presented by Connor Kirkpatrick", + "name": "Connor Kirkpatrick", + "bio": "Solution Engineer at AWS", + "linkedin": "connorkirkpatrick" + } + ] +} From 592d110ad8219125fa58690a72e4c42a5df7d841 Mon Sep 17 00:00:00 2001 From: Connor Kirkpatrick Date: Wed, 10 Dec 2025 16:10:43 +0000 Subject: [PATCH 2/5] Correct naming --- lambda-durable-ai-agent-with-tools/snippet-data.json | 2 +- lambda-durable-ai-human-review/snippet-data.json | 2 +- lambda-durable-ai-llm-as-judge/snippet-data.json | 2 +- lambda-durable-ai-parallel-invocation/snippet-data.json | 2 +- lambda-durable-ai-prompt-chaining/snippet-data.json | 2 +- lambda-durable-ai-structured-output/snippet-data.json | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lambda-durable-ai-agent-with-tools/snippet-data.json b/lambda-durable-ai-agent-with-tools/snippet-data.json index 8a4de06..c1b0bd1 100644 --- a/lambda-durable-ai-agent-with-tools/snippet-data.json +++ b/lambda-durable-ai-agent-with-tools/snippet-data.json @@ -1,5 +1,5 @@ { - "title": "Agent with Tools using Lambda Durable Functions", + "title": "Agent with Tools using AWS Lambda durable functions", "description": "Agentic loop where the LLM can call tools, including tools that suspend for human input, using AWS Lambda durable functions", "type": "Integration", "services": ["lambda", "bedrock"], diff --git a/lambda-durable-ai-human-review/snippet-data.json b/lambda-durable-ai-human-review/snippet-data.json index f28785c..2b29258 100644 --- a/lambda-durable-ai-human-review/snippet-data.json +++ b/lambda-durable-ai-human-review/snippet-data.json @@ -1,5 +1,5 @@ { - "title": "Human Review with Lambda Durable Functions", + "title": "Human Review with AWS Lambda durable functions", "description": "Pause AI workflow for human approval before continuing using AWS Lambda durable functions", "type": "Integration", "services": ["lambda", "bedrock"], diff --git a/lambda-durable-ai-llm-as-judge/snippet-data.json b/lambda-durable-ai-llm-as-judge/snippet-data.json index 56e1549..eb774d2 100644 --- a/lambda-durable-ai-llm-as-judge/snippet-data.json +++ b/lambda-durable-ai-llm-as-judge/snippet-data.json @@ -1,5 +1,5 @@ { - "title": "LLM as Judge with Lambda Durable Functions", + "title": "LLM as Judge with AWS Lambda durable functions", "description": "Get multiple AI responses in parallel, then use another LLM to pick the best one using AWS Lambda durable functions", "type": "Integration", "services": ["lambda", "bedrock"], diff --git a/lambda-durable-ai-parallel-invocation/snippet-data.json b/lambda-durable-ai-parallel-invocation/snippet-data.json index 6f5f596..70a63af 100644 --- a/lambda-durable-ai-parallel-invocation/snippet-data.json +++ b/lambda-durable-ai-parallel-invocation/snippet-data.json @@ -1,5 +1,5 @@ { - "title": "Parallel Invocation with Lambda Durable Functions", + "title": "Parallel Invocation with AWS Lambda durable functions", "description": "Run multiple LLM prompts simultaneously with individual checkpointing using AWS Lambda durable functions", "type": "Integration", "services": ["lambda", "bedrock"], diff --git a/lambda-durable-ai-prompt-chaining/snippet-data.json b/lambda-durable-ai-prompt-chaining/snippet-data.json index 1155c9e..291af7b 100644 --- a/lambda-durable-ai-prompt-chaining/snippet-data.json +++ b/lambda-durable-ai-prompt-chaining/snippet-data.json @@ -1,5 +1,5 @@ { - "title": "Prompt Chaining with Lambda Durable Functions", + "title": "Prompt Chaining with AWS Lambda durable functions", "description": "Sequential LLM calls where each step builds on the previous one using AWS Lambda durable functions", "type": "Integration", "services": ["lambda", "bedrock"], diff --git a/lambda-durable-ai-structured-output/snippet-data.json b/lambda-durable-ai-structured-output/snippet-data.json index da46f0c..4a8fc03 100644 --- a/lambda-durable-ai-structured-output/snippet-data.json +++ b/lambda-durable-ai-structured-output/snippet-data.json @@ -1,5 +1,5 @@ { - "title": "Generating Structured Output from LLMs with Lambda Durable Functions", + "title": "Generating Structured Output from LLMs with AWS Lambda durable functions", "description": "Get JSON output from an LLM and validate it against a schema using AWS Lambda durable functions", "type": "Integration", "services": ["lambda", "bedrock"], From 69ac97f69b1d829267f899a77a1c1786c85f194d Mon Sep 17 00:00:00 2001 From: Connor Kirkpatrick Date: Wed, 10 Dec 2025 16:22:46 +0000 Subject: [PATCH 3/5] Update TS examples --- lambda-durable-ai-human-review/example.ts | 16 +++++++++++++++- lambda-durable-ai-llm-as-judge/example.ts | 16 +++++++++++++++- lambda-durable-ai-parallel-invocation/example.ts | 16 +++++++++++++++- lambda-durable-ai-prompt-chaining/example.ts | 16 +++++++++++++++- lambda-durable-ai-structured-output/example.ts | 16 +++++++++++++++- 5 files changed, 75 insertions(+), 5 deletions(-) diff --git a/lambda-durable-ai-human-review/example.ts b/lambda-durable-ai-human-review/example.ts index 1787e73..93a5915 100644 --- a/lambda-durable-ai-human-review/example.ts +++ b/lambda-durable-ai-human-review/example.ts @@ -2,10 +2,24 @@ import { type DurableContext, withDurableExecution, } from "@aws/durable-execution-sdk-js"; +import { + BedrockRuntimeClient, + ConverseCommand, +} from "@aws-sdk/client-bedrock-runtime"; import { z } from "zod"; -import { converse } from "./utils/converse.js"; const MODEL_ID = "us.amazon.nova-pro-v1:0"; +const bedrock = new BedrockRuntimeClient({}); + +async function converse(modelId: string, prompt: string): Promise { + const response = await bedrock.send( + new ConverseCommand({ + modelId, + messages: [{ role: "user", content: [{ text: prompt }] }], + }), + ); + return response.output?.message?.content?.[0].text ?? ""; +} const ReviewResultSchema = z.object({ approved: z.boolean(), diff --git a/lambda-durable-ai-llm-as-judge/example.ts b/lambda-durable-ai-llm-as-judge/example.ts index 5f3b32e..d7468cf 100644 --- a/lambda-durable-ai-llm-as-judge/example.ts +++ b/lambda-durable-ai-llm-as-judge/example.ts @@ -2,9 +2,23 @@ import { type DurableContext, withDurableExecution, } from "@aws/durable-execution-sdk-js"; -import { converse } from "./utils/converse.js"; +import { + BedrockRuntimeClient, + ConverseCommand, +} from "@aws-sdk/client-bedrock-runtime"; const MODELS = ["us.amazon.nova-lite-v1:0", "us.amazon.nova-pro-v1:0"]; +const bedrock = new BedrockRuntimeClient({}); + +async function converse(modelId: string, prompt: string): Promise { + const response = await bedrock.send( + new ConverseCommand({ + modelId, + messages: [{ role: "user", content: [{ text: prompt }] }], + }), + ); + return response.output?.message?.content?.[0].text ?? ""; +} export const handler = withDurableExecution( async (event: { question?: string }, context: DurableContext) => { diff --git a/lambda-durable-ai-parallel-invocation/example.ts b/lambda-durable-ai-parallel-invocation/example.ts index 69f1927..752620d 100644 --- a/lambda-durable-ai-parallel-invocation/example.ts +++ b/lambda-durable-ai-parallel-invocation/example.ts @@ -2,9 +2,23 @@ import { type DurableContext, withDurableExecution, } from "@aws/durable-execution-sdk-js"; -import { converse } from "./utils/converse.js"; +import { + BedrockRuntimeClient, + ConverseCommand, +} from "@aws-sdk/client-bedrock-runtime"; const MODEL_ID = "us.amazon.nova-lite-v1:0"; +const bedrock = new BedrockRuntimeClient({}); + +async function converse(modelId: string, prompt: string): Promise { + const response = await bedrock.send( + new ConverseCommand({ + modelId, + messages: [{ role: "user", content: [{ text: prompt }] }], + }), + ); + return response.output?.message?.content?.[0].text ?? ""; +} const PROMPTS = [ "Explain the benefits of", diff --git a/lambda-durable-ai-prompt-chaining/example.ts b/lambda-durable-ai-prompt-chaining/example.ts index 57e0e8d..3976b46 100644 --- a/lambda-durable-ai-prompt-chaining/example.ts +++ b/lambda-durable-ai-prompt-chaining/example.ts @@ -2,9 +2,23 @@ import { type DurableContext, withDurableExecution, } from "@aws/durable-execution-sdk-js"; -import { converse } from "./utils/converse.js"; +import { + BedrockRuntimeClient, + ConverseCommand, +} from "@aws-sdk/client-bedrock-runtime"; const MODEL_ID = "us.amazon.nova-lite-v1:0"; +const bedrock = new BedrockRuntimeClient({}); + +async function converse(modelId: string, prompt: string): Promise { + const response = await bedrock.send( + new ConverseCommand({ + modelId, + messages: [{ role: "user", content: [{ text: prompt }] }], + }), + ); + return response.output?.message?.content?.[0].text ?? ""; +} export const handler = withDurableExecution( async (event: { topic?: string }, context: DurableContext) => { diff --git a/lambda-durable-ai-structured-output/example.ts b/lambda-durable-ai-structured-output/example.ts index aebbf9c..cdf321a 100644 --- a/lambda-durable-ai-structured-output/example.ts +++ b/lambda-durable-ai-structured-output/example.ts @@ -2,10 +2,24 @@ import { type DurableContext, withDurableExecution, } from "@aws/durable-execution-sdk-js"; +import { + BedrockRuntimeClient, + ConverseCommand, +} from "@aws-sdk/client-bedrock-runtime"; import { z } from "zod"; -import { converse } from "./utils/converse.js"; const MODEL_ID = "us.amazon.nova-lite-v1:0"; +const bedrock = new BedrockRuntimeClient({}); + +async function converse(modelId: string, prompt: string): Promise { + const response = await bedrock.send( + new ConverseCommand({ + modelId, + messages: [{ role: "user", content: [{ text: prompt }] }], + }), + ); + return response.output?.message?.content?.[0].text ?? ""; +} const ExtractedContact = z.object({ name: z.string(), From db9bea4093ef4c154ea50af5fda1f7abe131cd7c Mon Sep 17 00:00:00 2001 From: Connor Kirkpatrick Date: Wed, 10 Dec 2025 16:22:56 +0000 Subject: [PATCH 4/5] Update python examples --- lambda-durable-ai-human-review/example.py | 12 ++++++++++-- lambda-durable-ai-llm-as-judge/example.py | 12 ++++++++++-- lambda-durable-ai-parallel-invocation/example.py | 12 ++++++++++-- lambda-durable-ai-prompt-chaining/example.py | 12 ++++++++++-- lambda-durable-ai-structured-output/example.py | 12 ++++++++++-- 5 files changed, 50 insertions(+), 10 deletions(-) diff --git a/lambda-durable-ai-human-review/example.py b/lambda-durable-ai-human-review/example.py index 4a8ac0f..df09948 100644 --- a/lambda-durable-ai-human-review/example.py +++ b/lambda-durable-ai-human-review/example.py @@ -1,12 +1,20 @@ import json +import boto3 from aws_durable_execution_sdk_python import DurableContext, durable_execution from aws_durable_execution_sdk_python.config import Duration, WaitForCallbackConfig from pydantic import BaseModel -from utils.converse import converse - MODEL_ID = "us.amazon.nova-pro-v1:0" +bedrock = boto3.client("bedrock-runtime") + + +def converse(model_id: str, prompt: str) -> str: + response = bedrock.converse( + modelId=model_id, + messages=[{"role": "user", "content": [{"text": prompt}]}], + ) + return response["output"]["message"]["content"][0]["text"] class ReviewResult(BaseModel): diff --git a/lambda-durable-ai-llm-as-judge/example.py b/lambda-durable-ai-llm-as-judge/example.py index 041672f..7b91569 100644 --- a/lambda-durable-ai-llm-as-judge/example.py +++ b/lambda-durable-ai-llm-as-judge/example.py @@ -1,11 +1,19 @@ import json import re +import boto3 from aws_durable_execution_sdk_python import DurableContext, durable_execution -from utils.converse import converse - MODELS = ["us.amazon.nova-lite-v1:0", "us.amazon.nova-pro-v1:0"] +bedrock = boto3.client("bedrock-runtime") + + +def converse(model_id: str, prompt: str) -> str: + response = bedrock.converse( + modelId=model_id, + messages=[{"role": "user", "content": [{"text": prompt}]}], + ) + return response["output"]["message"]["content"][0]["text"] @durable_execution diff --git a/lambda-durable-ai-parallel-invocation/example.py b/lambda-durable-ai-parallel-invocation/example.py index 203f8f4..349f2af 100644 --- a/lambda-durable-ai-parallel-invocation/example.py +++ b/lambda-durable-ai-parallel-invocation/example.py @@ -1,8 +1,16 @@ +import boto3 from aws_durable_execution_sdk_python import DurableContext, durable_execution -from utils.converse import converse - MODEL_ID = "us.amazon.nova-lite-v1:0" +bedrock = boto3.client("bedrock-runtime") + + +def converse(model_id: str, prompt: str) -> str: + response = bedrock.converse( + modelId=model_id, + messages=[{"role": "user", "content": [{"text": prompt}]}], + ) + return response["output"]["message"]["content"][0]["text"] PROMPTS = [ "Explain the benefits of", diff --git a/lambda-durable-ai-prompt-chaining/example.py b/lambda-durable-ai-prompt-chaining/example.py index b745517..bc02772 100644 --- a/lambda-durable-ai-prompt-chaining/example.py +++ b/lambda-durable-ai-prompt-chaining/example.py @@ -1,8 +1,16 @@ +import boto3 from aws_durable_execution_sdk_python import DurableContext, durable_execution -from utils.converse import converse - MODEL_ID = "us.amazon.nova-lite-v1:0" +bedrock = boto3.client("bedrock-runtime") + + +def converse(model_id: str, prompt: str) -> str: + response = bedrock.converse( + modelId=model_id, + messages=[{"role": "user", "content": [{"text": prompt}]}], + ) + return response["output"]["message"]["content"][0]["text"] @durable_execution diff --git a/lambda-durable-ai-structured-output/example.py b/lambda-durable-ai-structured-output/example.py index f373892..58b7419 100644 --- a/lambda-durable-ai-structured-output/example.py +++ b/lambda-durable-ai-structured-output/example.py @@ -1,12 +1,20 @@ import json import re +import boto3 from aws_durable_execution_sdk_python import DurableContext, durable_execution from pydantic import BaseModel -from utils.converse import converse - MODEL_ID = "us.amazon.nova-lite-v1:0" +bedrock = boto3.client("bedrock-runtime") + + +def converse(model_id: str, prompt: str) -> str: + response = bedrock.converse( + modelId=model_id, + messages=[{"role": "user", "content": [{"text": prompt}]}], + ) + return response["output"]["message"]["content"][0]["text"] class ExtractedContact(BaseModel): From 2410f9be28910b8d0f37b8099342e139cff5e9c7 Mon Sep 17 00:00:00 2001 From: Connor Kirkpatrick Date: Wed, 10 Dec 2025 16:26:20 +0000 Subject: [PATCH 5/5] Improve snippet description --- lambda-durable-ai-prompt-chaining/snippet-data.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lambda-durable-ai-prompt-chaining/snippet-data.json b/lambda-durable-ai-prompt-chaining/snippet-data.json index 291af7b..204adcd 100644 --- a/lambda-durable-ai-prompt-chaining/snippet-data.json +++ b/lambda-durable-ai-prompt-chaining/snippet-data.json @@ -10,7 +10,7 @@ "text": [ "This pattern demonstrates prompt chaining with AWS Lambda durable functions. Each LLM call is checkpointed, enabling automatic retry on failure and deterministic replay.", "The example generates a joke about a topic, then uses a second LLM call to review and rate the joke.", - "Benefits: Expensive retries are avoided by resuming from the last checkpoint. Non-deterministic LLM outputs become deterministic through replay. Failed workflows don't restart from scratch." + "Benefits: Expensive retries are avoided by resuming from the last checkpoint. Replayed LLM calls return the same response each time. Failed workflows don't restart from scratch." ] }, "gitHub": {