From 43db084f5cf2e5588fd498b48c1952023dc4e09e Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Thu, 1 Jan 2026 17:21:24 +0530 Subject: [PATCH 01/15] adds crew ai --- ai-agents/crew-ai-actions.mdx | 9 + ai-agents/crew-ai-knowlege-agent-pdf.mdx | 88 ++++++ ai-agents/crew-ai-product-hunt-agent.mdx | 118 ++++++++ ai-agents/crew-ai-tools.mdx | 9 + ai-agents/crew-ai.mdx | 365 +++++++++++++++++++++++ 5 files changed, 589 insertions(+) create mode 100644 ai-agents/crew-ai-actions.mdx create mode 100644 ai-agents/crew-ai-knowlege-agent-pdf.mdx create mode 100644 ai-agents/crew-ai-product-hunt-agent.mdx create mode 100644 ai-agents/crew-ai-tools.mdx create mode 100644 ai-agents/crew-ai.mdx diff --git a/ai-agents/crew-ai-actions.mdx b/ai-agents/crew-ai-actions.mdx new file mode 100644 index 00000000..bb7ee7e1 --- /dev/null +++ b/ai-agents/crew-ai-actions.mdx @@ -0,0 +1,9 @@ +--- +title: "AI Agent Actions" +sidebarTitle: "Actions" +description: "Explore the various actions you can perform with your AI agent in CometChat." +--- + +import Actions from '/snippets/ai-agents/actions.mdx'; + + diff --git a/ai-agents/crew-ai-knowlege-agent-pdf.mdx b/ai-agents/crew-ai-knowlege-agent-pdf.mdx new file mode 100644 index 00000000..e10be2be --- /dev/null +++ b/ai-agents/crew-ai-knowlege-agent-pdf.mdx @@ -0,0 +1,88 @@ +--- +title: "PDF Knowledge Agent with CrewAI" +sidebarTitle: "PDF Knowledge Agent" +description: "Ingest PDFs, extract text, and let a CrewAI agent answer questions with citations while streaming to CometChat." +--- + +import { Steps, Step } from 'mintlify'; + +Turn PDF handbooks into an on-demand assistant. The agent ingests PDFs, retrieves relevant chunks, and answers with citations. + +*** + +## What you’ll build + +- A PDF ingestion routine that extracts text into `knowledge//`. +- CrewAI tools to ingest and retrieve PDF context. +- An agent that cites sources and only responds when invoked. + +*** + +## Prerequisites + +- CrewAI project from [crew-ai.mdx](/ai-agents/crew-ai) +- `pypdf` or `pdfplumber` installed for extraction (`uv add pypdf`) + +*** + +## Steps + + + + Convert PDFs into markdown/text and save under knowledge//. Store filenames for citations. + + + ingest_pdf: accept a file path/URL, extract text, save to knowledge folder. + retrieve_pdf_context: search extracted text for a query and return top chunks with source names. + + + Force a retrieval call before answering and append a “Sources” list. + + + Use the same NDJSON pipeline; include the namespace or PDF title in tool args for clarity. + + + +*** + +## Sample retrieval tool + +`src/crew_demo/tools/retrieve_pdf_context.py` + +```python +import json +from pathlib import Path +from crewai.tools import tool + + +@tool("retrieve_pdf_context") +def retrieve_pdf_context(query: str, namespace: str = "pdf") -> str: + """Retrieve text snippets from extracted PDFs.""" + base = Path(__file__).parent.parent.parent / "knowledge" / namespace + matches = [] + for path in base.glob("*.txt"): + content = path.read_text(encoding="utf-8") + if query.lower() in content.lower(): + matches.append({"source": path.name, "excerpt": content[:800]}) + return json.dumps({"matches": matches[:3], "namespace": namespace}) +``` + +Pair this with an ingestion script that turns PDFs into `.txt` files in the same namespace. + +*** + +## Agent prompt essentials + +- Always call `retrieve_pdf_context` first. +- Answer only from retrieved excerpts; if nothing is found, say so. +- Cite sources as `Sources: file-one.txt, file-two.txt`. + +*** + +## CometChat setup + +Provider: **CrewAI**, Agent ID: `pdf_agent`, Deployment URL: `/kickoff`. + +Suggested prompts: “@pdf_agent What does the onboarding guide say about PTO?”. + +--- diff --git a/ai-agents/crew-ai-product-hunt-agent.mdx b/ai-agents/crew-ai-product-hunt-agent.mdx new file mode 100644 index 00000000..ca5f752f --- /dev/null +++ b/ai-agents/crew-ai-product-hunt-agent.mdx @@ -0,0 +1,118 @@ +--- +title: "Build a Product Hunt Agent with CrewAI" +sidebarTitle: "Product Hunt Agent" +description: "Create a CrewAI agent that fetches Product Hunt posts, answers launch questions, and can trigger frontend actions like confetti." +--- + +import { Steps, Step } from 'mintlify'; + +Give your chats Product Hunt superpowers: search launches, surface top posts, and celebrate wins with a UI action. + +--- + +## What you’ll build + +- A CrewAI agent with tools to **get top posts**, **search**, and **trigger a frontend action** (confetti). +- A FastAPI `/kickoff` endpoint streaming NDJSON for CometChat. +- Optional static page or widget that maps the confetti tool to a UI handler. + +--- + +## Prerequisites + +- Node or Python hosting for your CrewAI service +- `OPENAI_API_KEY` for chat +- `PRODUCTHUNT_API_TOKEN` (GraphQL) for live data +- CometChat app + AI Agent entry + +--- + +## Steps + + + + Create tools for get_top_products, search_products, and get_top_by_timeframe. Require PRODUCTHUNT_API_TOKEN and validate inputs. + + + Include a simple action tool (see frontend actions guide) to trigger celebration in the UI. + + + Explain when to use each tool, how to summarize results in tables, and when to celebrate. + + + Reuse the NDJSON streaming pattern from crew-ai.mdx. + + + Provider = CrewAI, Agent ID = product_hunt, Deployment URL = your public endpoint. + + + +--- + +## Sample tool (top products) + +`src/crew_demo/tools/product_hunt.py` + +```python +import os, json, httpx +from crewai.tools import tool + + +PRODUCT_HUNT_API = "https://api.producthunt.com/v2/api/graphql" +HEADERS = lambda: {"Authorization": f"Bearer {os.getenv('PRODUCTHUNT_API_TOKEN')}"} + + +@tool("get_top_products") +def get_top_products(limit: int = 3) -> str: + """Fetch top Product Hunt posts by votes.""" + if not os.getenv("PRODUCTHUNT_API_TOKEN"): + raise Exception("PRODUCTHUNT_API_TOKEN not set") + + query = """ + query TopProducts($limit: Int!) { + posts(order: VOTES, first: $limit) { + edges { node { name tagline votesCount url } } + } + } + """ + resp = httpx.post(PRODUCT_HUNT_API, headers=HEADERS(), json={"query": query, "variables": {"limit": limit}}, timeout=10.0) + resp.raise_for_status() + data = resp.json()["data"]["posts"]["edges"] + posts = [{"name": edge["node"]["name"], "tagline": edge["node"]["tagline"], "votes": edge["node"]["votesCount"], "url": edge["node"]["url"]} for edge in data] + return json.dumps({"posts": posts}) +``` + +Register the tool and combine it with a confetti action in `crew.py`. + +--- + +## Agent configuration + +`src/crew_demo/config/agents.yaml` + +```yaml +product_hunt: + role: Product Hunt Launch Assistant + goal: Fetch Product Hunt data and celebrate launches + backstory: > + Use get_top_products for rankings and a search tool for queries. + Present answers as bullet points or compact tables. Fire confetti on wins when asked. +``` + +--- + +## Frontend handling + +- Map `tool_result` for the confetti action to your UI handler (Widget/React UI Kit). +- Clamp limits (e.g., max 10 posts) in the tool to avoid huge responses. +- Never expose API tokens to the client—keep calls server-side. + +--- + +## Troubleshooting + +- Empty posts: confirm `PRODUCTHUNT_API_TOKEN` and check rate limits. +- Confetti not firing: ensure the tool name matches your client handler mapping. +- Slow responses: cache popular queries or reduce `limit`. + +--- diff --git a/ai-agents/crew-ai-tools.mdx b/ai-agents/crew-ai-tools.mdx new file mode 100644 index 00000000..78f2f929 --- /dev/null +++ b/ai-agents/crew-ai-tools.mdx @@ -0,0 +1,9 @@ +--- +title: "AI Agent Tools" +sidebarTitle: "Tools" +description: "Explore the various tools you can use with your AI agent in CometChat." +--- + +import Tools from '/snippets/ai-agents/tools.mdx'; + + diff --git a/ai-agents/crew-ai.mdx b/ai-agents/crew-ai.mdx new file mode 100644 index 00000000..0887c5cd --- /dev/null +++ b/ai-agents/crew-ai.mdx @@ -0,0 +1,365 @@ +--- +title: "Create an AI Agent with CrewAI" +sidebarTitle: "Create AI Agent" +description: "Wire a CrewAI agent to CometChat, stream messages over NDJSON, and ship it through UI Kit Builder or the Chat Widget." +--- + +import { Steps, Step, CardGroup, Card } from 'mintlify'; + +## What you’ll build + +- A **CrewAI** project (Python + FastAPI) that streams NDJSON events CometChat can consume. +- A simple **weather tool** plus room to add your own tools. +- A **CometChat AI Agent** entry pointing to your CrewAI `/kickoff` endpoint. +- Optional **UI exports** via **UI Kit Builder** or the **Chat Widget**. + +--- + +## Prerequisites + +- Python **3.13.2+** with `uv` or `pip` +- CrewAI **v1.7.2** +- OpenAI API key (or Anthropic via environment) +- A CometChat app: **[Create App](https://app.cometchat.com/apps)** + +--- + +## Step 1 - Create your CometChat app + + + + Sign in at app.cometchat.com. Create a new app or open an existing one. + + + Note your App ID, Region, and Auth Key (needed if you export the Chat Widget later). + + + +--- + +## Step 2 - Scaffold a CrewAI project + +Use `uv` (fast Python package manager). Install it if needed: + +```bash +curl -LsSf https://astral.sh/uv/install.sh | sh +# or +pip install uv +``` + +Create a CrewAI project: + +```bash +uv tool run crewai create crew crew_demo +cd crew_demo +``` + +Update `pyproject.toml` with required dependencies: + +```toml +[project] +name = "crew-demo" +version = "0.1.0" +description = "CrewAI agent integrated with CometChat" +requires-python = ">=3.13" +dependencies = [ + "crewai[tools]==1.7.2", + "fastapi>=0.115.6", + "uvicorn>=0.34.0", + "httpx>=0.28.1", + "python-dotenv>=1.0.1", + "openai>=1.13.3" +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" +``` + +Install everything: + +```bash +uv sync +``` + +--- + +## Step 3 - Configure environment + +Create a `.env` in your project root: + +```bash +MODEL=gpt-4o-mini +OPENAI_API_KEY=your_openai_api_key_here +SERPER_API_KEY=your_serper_api_key_here # optional search +OPENWEATHER_API_KEY=your_openweather_api_key_here +PORT=8000 +HOST=0.0.0.0 +CREWAI_TRACING_ENABLED=false # disable for production +``` + +--- + +## Step 4 - Add a tool (weather) + +`src/crew_demo/tools/weather_tool.py` + +```python +import os, json +from datetime import datetime, timezone +import httpx +from crewai.tools import tool + + +@tool("get_weather") +def get_weather(location: str, unit: str = "fahrenheit") -> str: + """Return the current weather for a location as JSON.""" + api_key = os.getenv("OPENWEATHER_API_KEY") + if not api_key: + raise Exception("OPENWEATHER_API_KEY not set") + + params = {"q": location, "appid": api_key, "units": "imperial" if unit.lower() == "fahrenheit" else "metric"} + response = httpx.get("http://api.openweathermap.org/data/2.5/weather", params=params, timeout=10.0) + response.raise_for_status() + data = response.json() + + return json.dumps({ + "location": data["name"], + "country": data["sys"]["country"], + "temperature": round(data["main"]["temp"], 1), + "conditions": data["weather"][0]["description"], + "humidity": data["main"]["humidity"], + "wind_speed": data["wind"]["speed"], + "unit": unit, + "timestamp": datetime.now(tz=timezone.utc).isoformat() + }) +``` + +--- + +## Step 5 - Define the agent + task + +`src/crew_demo/config/agents.yaml` + +```yaml +assistant: + role: Friendly Conversational Assistant + goal: Help users with greetings and weather questions + backstory: > + Keep responses concise, use the get_weather tool for weather queries, + and only show the final answer (no thoughts or reasoning traces). +``` + +`src/crew_demo/config/tasks.yaml` + +```yaml +conversation_task: + description: > + Respond to the user's message: {user_message} + Previous conversation context: {conversation_history} + Use the weather tool when relevant; otherwise answer directly. + expected_output: > + A helpful, natural response to the user's query + agent: assistant +``` + +--- + +## Step 6 - Wire up CrewAI + +`src/crew_demo/crew.py` + +```python +from typing import List +from crewai import Agent, Crew, Process, Task +from crewai.crew import EventListener +from crewai.agents.agent_builder.base_agent import BaseAgent +from crewai.project import CrewBase, agent, crew, task +from crewai_tools import SerperDevTool + +from crew_demo.tools.weather_tool import get_weather + +listener = EventListener() + + +@CrewBase +class CrewDemo(): + agents: List[BaseAgent] + tasks: List[Task] + + search_tool = SerperDevTool() + agents_config = "config/agents.yaml" + tasks_config = "config/tasks.yaml" + + @agent + def assistant(self) -> Agent: + return Agent( + config=self.agents_config["assistant"], # type: ignore[index] + tools=[self.search_tool, get_weather], + verbose=False, + memory=False, + ) + + @task + def conversation_task(self) -> Task: + return Task( + config=self.tasks_config["conversation_task"], # type: ignore[index] + markdown=True, + verbose=False, + ) + + @crew + def crew(self) -> Crew: + return Crew( + agents=self.agents, + tasks=self.tasks, + process=Process.sequential, + verbose=False, + stream=True, + ) +``` + +--- + +## Step 7 - Expose a FastAPI endpoint with NDJSON streaming + +`src/crew_demo/main.py` (excerpt) + +```python +import json, os, warnings +from datetime import datetime +from uuid import uuid4 +from typing import List, Optional + +import uvicorn +from fastapi import FastAPI +from fastapi.responses import StreamingResponse +from pydantic import BaseModel +from crewai.types.streaming import StreamChunk, StreamChunkType, CrewStreamingOutput + +from crew_demo.crew import CrewDemo + +warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd") +app = FastAPI() + + +class Message(BaseModel): + role: str + content: str + + +class KickoffRequest(BaseModel): + messages: List[Message] + threadId: Optional[str] = None + runId: Optional[str] = None + + +def stream_crew(inputs: dict): + message_id = str(uuid4()) + yield json.dumps({"type": "text_start", "message_id": message_id}) + "\n" + + streaming: CrewStreamingOutput = CrewDemo().crew().kickoff(inputs=inputs) + buffer, final_started = "", False + + for chunk in streaming: + if not isinstance(chunk, StreamChunk): + continue + + if chunk.chunk_type == StreamChunkType.TOOL_CALL and chunk.tool_call: + tool_call_id = str(uuid4()) + yield json.dumps({"type": "tool_call_start", "tool_call_id": tool_call_id, "name": chunk.tool_call.tool_name, "parent_message_id": message_id}) + "\n" + yield json.dumps({"type": "tool_call_args", "tool_call_id": tool_call_id, "args": json.loads(chunk.tool_call.arguments) if isinstance(chunk.tool_call.arguments, str) else chunk.tool_call.arguments}) + "\n" + yield json.dumps({"type": "tool_call_end", "tool_call_id": tool_call_id, "name": chunk.tool_call.tool_name}) + "\n" + if getattr(chunk.tool_call, "result", None): + yield json.dumps({"type": "tool_result", "tool_call_id": tool_call_id, "result": chunk.tool_call.result}) + "\n" + continue + + if chunk.chunk_type == StreamChunkType.TEXT: + buffer += chunk.content + if not final_started and "Final Answer:" in buffer: + final_started = True + final_chunk = buffer.split("Final Answer:")[-1].strip() + if final_chunk: + yield json.dumps({"type": "text_delta", "message_id": message_id, "content": final_chunk}) + "\n" + buffer = "" + elif final_started: + yield json.dumps({"type": "text_delta", "message_id": message_id, "content": chunk.content}) + "\n" + + if not final_started and buffer.strip(): + yield json.dumps({"type": "text_delta", "message_id": message_id, "content": buffer.strip()}) + "\n" + + yield json.dumps({"type": "text_end", "message_id": message_id}) + "\n" + yield json.dumps({"type": "done"}) + "\n" + + +@app.post("/kickoff") +async def kickoff(request: KickoffRequest): + inputs = { + "user_message": request.messages[-1].content, + "conversation_history": "\n".join([f"{m.role}: {m.content}" for m in request.messages[:-1]]), + "current_year": str(datetime.now().year), + } + return StreamingResponse(stream_crew(inputs), media_type="application/x-ndjson") + + +def run(): + uvicorn.run(app, host=os.getenv("HOST", "0.0.0.0"), port=int(os.getenv("PORT", "8000"))) + + +if __name__ == "__main__": + run() +``` + +Events emitted follow CometChat’s NDJSON shape: + +- Text: `text_start`, `text_delta`, `text_end` +- Tools: `tool_call_start`, `tool_call_args`, `tool_call_end`, `tool_result` +- Control: `done`, `error` + +--- + +## Step 8 - Run & test locally + +```bash +uv run crewai run # or: uv run uvicorn crew_demo.main:app --reload +curl -N -X POST http://localhost:8000/kickoff \ + -H "Content-Type: application/json" \ + -d '{"messages":[{"role":"user","content":"Hello!"}]}' +``` + +You should see NDJSON events streaming in the terminal. + +--- + +## Step 9 - Connect in CometChat + +In the CometChat Dashboard → **AI Agents → Add Agent**: + +- **Provider**: CrewAI +- **Name**: e.g., `CrewAI Weather Assistant` +- **Deployment URL**: `https://your-domain.com/kickoff` +- (Optional) **Headers**: flat JSON for auth (e.g., `{ "Authorization": "Bearer " }`) +- (Optional) Greeting, intro message, suggested prompts + +Save and toggle the agent **ON**. CometChat will stream user messages to `/kickoff` and relay NDJSON responses back to the client. + +--- + +## Step 10 - Extend your Crew + +- **Add tools**: decorate new Python functions with `@tool("name")` and register them in `crew.py`. +- **Switch LLMs**: set `ANTHROPIC_API_KEY` + `LLM_MODEL=claude-3-5-sonnet-20241022` (CrewAI auto-detects providers). +- **Multi-agent**: define multiple agents in `agents.yaml`, map tasks, and set `process=Process.sequential` or `Process.hierarchical`. +- **Structured output**: add `output_json: true` to a task to enforce JSON replies. +- **Production**: run `uvicorn crew_demo.main:app --host 0.0.0.0 --port 8000 --workers 4`, enable HTTPS, auth headers, CORS, and rate limits. + +--- + +## Step 11 - Deploy & ship UI + + + } description="Embed / script" href="/ai-agents/chat-widget" horizontal /> + } href="https://www.cometchat.com/docs/ui-kit/react/ai-assistant-chat" horizontal>Pre Built UI Components + + +> The CrewAI agent you configured is included automatically in exported variants—no extra client code needed for basic chat. From c6ebef0b588a81927d04b0a90af6ad1e226edecb Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Thu, 1 Jan 2026 17:26:09 +0530 Subject: [PATCH 02/15] updates structure for crew ai --- ai-agents/crew-ai-knowledge-agent.mdx | 158 +++ ai-agents/crew-ai/crew-ai-agents-playbook.mdx | 73 + .../crew-ai/crew-ai-backend-tools-agent.mdx | 122 ++ ai-agents/crew-ai/crew-ai-chef-agent.mdx | 101 ++ .../crew-ai/crew-ai-coordinator-agent.mdx | 121 ++ .../crew-ai-frontend-actions-agent.mdx | 119 ++ .../crew-ai/crew-ai-group-chat-agent.mdx | 93 ++ ai-agents/crew-ai/crew-ai-handoff-agent.mdx | 106 ++ .../crew-ai-knowlege-agent-pdf.mdx | 0 .../crew-ai/crew-ai-orchestrator-agent.mdx | 81 ++ ai-agents/crew-ai/crew-ai.text | 1246 +++++++++++++++++ 11 files changed, 2220 insertions(+) create mode 100644 ai-agents/crew-ai-knowledge-agent.mdx create mode 100644 ai-agents/crew-ai/crew-ai-agents-playbook.mdx create mode 100644 ai-agents/crew-ai/crew-ai-backend-tools-agent.mdx create mode 100644 ai-agents/crew-ai/crew-ai-chef-agent.mdx create mode 100644 ai-agents/crew-ai/crew-ai-coordinator-agent.mdx create mode 100644 ai-agents/crew-ai/crew-ai-frontend-actions-agent.mdx create mode 100644 ai-agents/crew-ai/crew-ai-group-chat-agent.mdx create mode 100644 ai-agents/crew-ai/crew-ai-handoff-agent.mdx rename ai-agents/{ => crew-ai}/crew-ai-knowlege-agent-pdf.mdx (100%) create mode 100644 ai-agents/crew-ai/crew-ai-orchestrator-agent.mdx create mode 100644 ai-agents/crew-ai/crew-ai.text diff --git a/ai-agents/crew-ai-knowledge-agent.mdx b/ai-agents/crew-ai-knowledge-agent.mdx new file mode 100644 index 00000000..c99f6bdc --- /dev/null +++ b/ai-agents/crew-ai-knowledge-agent.mdx @@ -0,0 +1,158 @@ +--- +title: "Build Your Knowledge Agent with CrewAI" +sidebarTitle: "Knowledge Agent" +description: "Create a CrewAI knowledge agent that answers from your docs, streams NDJSON to CometChat, and cites sources." +--- + +import { Steps, Step } from 'mintlify'; + +Imagine an agent that only answers when asked, pulls context from your docs, and responds with concise, cited answers right inside chat. + +*** + +## What you’ll build + +- A **CrewAI** agent scoped to documentation questions. +- A lightweight **ingest + retrieve** flow that reads files from `knowledge/`. +- A **FastAPI `/kickoff`** endpoint that streams NDJSON events CometChat consumes. +- A **CometChat AI Agent** entry pointing at your deployment. + +*** + +## Prerequisites + +- CrewAI project (see [Create an AI Agent with CrewAI](/ai-agents/crew-ai)) +- Python 3.13.2+, `uv` or `pip` +- `OPENAI_API_KEY` in `.env` +- A CometChat app + +*** + +## How it works + +- **Ingest**: drop markdown/text/PDF summaries into `knowledge//`. +- **Retrieve**: a CrewAI tool scans that folder, scores snippets, and returns top matches with filenames for citations. +- **Answer**: the agent replies only when explicitly mentioned (`@agent`) and always cites sources. +- **Stream**: `/kickoff` emits NDJSON events (`text_*`, `tool_*`, `done`) that CometChat renders in real time. + +*** + +## Steps + + + + Create knowledge/default (or any namespace) and add markdown/text files. You can sync files from your CMS or build a simple CLI to keep this folder updated. + + + Implement a CrewAI tool that loads files from the namespace, extracts top snippets, and returns JSON with content, source, and optional score. + + + Configure the agent to respond only when mentioned and to always cite sources. Keep the backstory strict about using the retriever tool first. + + + Reuse the FastAPI NDJSON stream from crew-ai.mdx. Pass namespace via tool params if you support multiple doc sets. + + + Dashboard → AI Agents → Provider = CrewAI, Agent ID = knowledge, Deployment URL = your public /kickoff. + + + +*** + +## Sample retriever tool + +`src/crew_demo/tools/docs_retriever.py` + +```python +import json +from pathlib import Path +from typing import List, Dict +from crewai.tools import tool + + +def _load_documents(namespace: str) -> List[Dict[str, str]]: + base = Path(__file__).parent.parent.parent / "knowledge" / namespace + docs = [] + for path in base.rglob("*.md"): + try: + docs.append({"source": path.name, "content": path.read_text(encoding="utf-8")}) + except Exception: + continue + return docs + + +@tool("search_docs") +def search_docs(query: str, namespace: str = "default", limit: int = 3) -> str: + """Return top matching snippets from knowledge/.""" + docs = _load_documents(namespace) + scored = [] + for doc in docs: + if query.lower() in doc["content"].lower(): + scored.append({"source": doc["source"], "excerpt": doc["content"][:800]}) + top = scored[:limit] if scored else [] + return json.dumps({"matches": top, "namespace": namespace}) +``` + +*** + +## Agent configuration + +`src/crew_demo/config/agents.yaml` + +```yaml +knowledge: + role: Knowledge Retrieval Specialist + goal: Answer questions using retrieved docs only + backstory: > + Respond only when mentioned (e.g., @agent). + Always call the search_docs tool first, then compose a concise answer with a Sources list. + Never reveal internal reasoning or tool output verbatim. +``` + +`src/crew_demo/config/tasks.yaml` + +```yaml +knowledge_task: + description: > + Use search_docs to find relevant context for: {user_message} + Namespace: {namespace} + expected_output: > + A short answer plus a "Sources:" list. + agent: knowledge +``` + +Register the tool in `crew.py`: + +```python +from crew_demo.tools.docs_retriever import search_docs + +@agent +def knowledge(self) -> Agent: + return Agent( + config=self.agents_config["knowledge"], # type: ignore[index] + tools=[search_docs], + verbose=False, + memory=False, + ) +``` + +*** + +## CometChat setup + + + Open CometChat Dashboard → AI Agents. + Provider = CrewAI, Agent ID = knowledge, Deployment URL = your /kickoff. + Add greeting/intro and suggested prompts like “@agent What’s our refund policy?” + Save and toggle the agent ON. + + +*** + +## Tips + +- Add basic file filters (size/type) before ingesting to keep retrieval fast. +- If you need embeddings/vector search, swap the retriever implementation; CometChat only expects the NDJSON stream, not the retrieval method. +- Use `output_json: true` on the task if you want fully structured answers. + +--- diff --git a/ai-agents/crew-ai/crew-ai-agents-playbook.mdx b/ai-agents/crew-ai/crew-ai-agents-playbook.mdx new file mode 100644 index 00000000..dd6708a6 --- /dev/null +++ b/ai-agents/crew-ai/crew-ai-agents-playbook.mdx @@ -0,0 +1,73 @@ +--- +title: "CrewAI Agents Playbook" +description: "Guides for six core CrewAI agents: Knowledge Retrieval, Frontend Action, Backend Tool, Handoff, Relay/Coordinator, and Group Chat." +--- + +# Knowledge Retrieval Agent + +Build a retrieval-first agent that answers questions from your docs with citations. + + + Build & integrate the knowledge retrieval agent. + + +--- + +# Frontend Actions Agent + +Emit structured UI actions (navigate, openModal, toggleTheme) that your frontend executes safely. + + + Return safe UI actions from the agent. + + +--- + +# Backend Tools Agent + +Let the agent call backend tools or APIs (weather, CRM, ticketing) and stream results back. + + + Wire backend tools & external APIs. + + +--- + +# Human Handoff Agent + +Escalate gracefully when a human is needed by invoking a `handoff` tool with target + reason. + + + Escalate to humans with context. + + +--- + +# Multi-agent Orchestration (Coordinator) Agent + +Fan out to multiple sub-agents (billing, support, research) and return a consolidated answer. + + + Coordinate across specialized agents. + + +--- + +# Group Chat Agent + +An agent that lives in group chats and only responds when explicitly mentioned (e.g., `@agent`). + + + Mention-aware agent for group rooms. + + +--- + +## Next Steps + +* Combine these agents inside one CrewAI project. +* Deploy to your preferred host and connect them via **CometChat Dashboard → AI Agents**. +* Layer guardrails, retrieval, and workflows as needed. +* Share the playbook with your team so they can copy-paste working agent setups. + +--- diff --git a/ai-agents/crew-ai/crew-ai-backend-tools-agent.mdx b/ai-agents/crew-ai/crew-ai-backend-tools-agent.mdx new file mode 100644 index 00000000..94324bf6 --- /dev/null +++ b/ai-agents/crew-ai/crew-ai-backend-tools-agent.mdx @@ -0,0 +1,122 @@ +--- +title: "Backend Tools Agent with CrewAI" +sidebarTitle: "Backend Tools Agent" +description: "Let a CrewAI agent call backend APIs (CRM, weather, ticketing) and stream results to CometChat via NDJSON." +--- + +import { Steps, Step } from 'mintlify'; + +Put your APIs behind controlled tools so the agent can fetch data or perform actions while CometChat streams results live. + +*** + +## What you’ll build + +- A CrewAI agent configured with one or more **backend tools**. +- Tool functions that validate inputs, call external APIs, and raise exceptions on failure. +- A `/kickoff` endpoint that surfaces tool call events to CometChat. + +*** + +## Prerequisites + +- CrewAI project from [crew-ai.mdx](/ai-agents/crew-ai) +- API keys for any services you call (store them in `.env`) + +*** + +## Steps + + + + Keep each tool focused (single responsibility) and add clear docstrings so the LLM knows when to call them. + + + Check for required fields and raise exceptions for bad data—don’t return error strings. + + + Add tools to the agent in crew.py and set process=Process.sequential or another flow as needed. + + + Reuse the NDJSON stream from crew-ai.mdx so CometChat can render tool progress. + + + +*** + +## Sample tool: fetch deals from a CRM + +`src/crew_demo/tools/get_deals.py` + +```python +import os, httpx, json +from crewai.tools import tool + + +@tool("get_recent_deals") +def get_recent_deals(limit: int = 5) -> str: + """Fetch recent deals from the CRM.""" + api_key = os.getenv("CRM_API_KEY") + base_url = os.getenv("CRM_BASE_URL", "https://api.example-crm.com") + if not api_key: + raise Exception("CRM_API_KEY not set") + + resp = httpx.get(f"{base_url}/deals", params={"limit": limit}, headers={"Authorization": f"Bearer {api_key}"}, timeout=10.0) + resp.raise_for_status() + deals = resp.json().get("deals", []) + return json.dumps({"deals": deals[:limit]}) +``` + +Register in `crew.py`: + +```python +from crew_demo.tools.get_deals import get_recent_deals + +@agent +def backend(self) -> Agent: + return Agent( + config=self.agents_config["backend"], # type: ignore[index] + tools=[get_recent_deals], + verbose=False, + memory=False, + ) +``` + +*** + +## Agent/task configuration + +`src/crew_demo/config/agents.yaml` + +```yaml +backend: + role: Backend Integrations Agent + goal: Call approved APIs and summarize results + backstory: > + Always validate inputs, call the correct tool, and summarize results clearly. + Never expose raw errors—raise exceptions and let the server handle them. +``` + +`src/crew_demo/config/tasks.yaml` + +```yaml +backend_task: + description: > + Use the appropriate tool to answer: {user_message} + expected_output: > + A short summary plus any key fields (totals, status, links). + agent: backend +``` + +*** + +## Connect to CometChat + +- Provider: **CrewAI** +- Agent ID: `backend` +- Deployment URL: your public `/kickoff` +- Optional headers: `{ "Authorization": "Bearer " }` + +Ensure your FastAPI service returns NDJSON with tool events so users can see when an API call is running. + +--- diff --git a/ai-agents/crew-ai/crew-ai-chef-agent.mdx b/ai-agents/crew-ai/crew-ai-chef-agent.mdx new file mode 100644 index 00000000..94407416 --- /dev/null +++ b/ai-agents/crew-ai/crew-ai-chef-agent.mdx @@ -0,0 +1,101 @@ +--- +title: "Chef Agent with CrewAI" +sidebarTitle: "Chef Agent" +description: "Build a CrewAI recipe assistant that plans meals, checks pantry items, and can trigger frontend actions (e.g., show shopping list)." +--- + +import { Steps, Step } from 'mintlify'; + +Delight users with a culinary copilot that suggests recipes, checks pantry items via tools, and returns structured shopping lists. + +*** + +## What you’ll build + +- A CrewAI agent with cooking-focused instructions. +- Tools for pantry lookup and recipe generation. +- Optional frontend action that displays a shopping list in your UI. + +*** + +## Prerequisites + +- CrewAI project ([crew-ai.mdx](/ai-agents/crew-ai)) +- Any ingredient/recipe API keys if you use a third-party source + +*** + +## Steps + + + + Provide a tool to check ingredient availability (local DB or API). Return structured data for the agent. + + + Let the LLM craft recipes using the pantry results. Keep responses concise with steps + ingredient list. + + + Emit a UI action (see frontend actions guide) so your app can show a list or start a checkout flow. + + + +*** + +## Sample tools + +`src/crew_demo/tools/pantry.py` + +```python +import json +from crewai.tools import tool + +PANTRY = {"eggs": 6, "milk": 1, "flour": 500} # grams or units + + +@tool("check_pantry") +def check_pantry(items: str) -> str: + """Return which requested items exist in the pantry.""" + requested = [i.strip().lower() for i in items.split(",") if i.strip()] + found = {item: PANTRY.get(item, 0) for item in requested} + return json.dumps({"items": found}) +``` + +`src/crew_demo/tools/shopping_list.py` + +```python +import json +from crewai.tools import tool + + +@tool("show_shopping_list") +def show_shopping_list(items: str) -> str: + """Return a shopping list action for the frontend.""" + return json.dumps({ + "action": "show_shopping_list", + "parameters": {"items": [i.strip() for i in items.split(",") if i.strip()]} + }) +``` + +*** + +## Agent configuration + +`src/crew_demo/config/agents.yaml` + +```yaml +chef: + role: Culinary Assistant + goal: Propose recipes users can cook with available ingredients + backstory: > + First check pantry items with check_pantry. + Suggest a recipe with steps and a short shopping list if needed. + If asked, trigger show_shopping_list with the missing items. +``` + +--- + +## CometChat setup + +Provider: **CrewAI**, Agent ID: `chef`, Deployment URL: `/kickoff`. Provide suggested prompts like “What can I make with eggs, milk, and flour?”. + +--- diff --git a/ai-agents/crew-ai/crew-ai-coordinator-agent.mdx b/ai-agents/crew-ai/crew-ai-coordinator-agent.mdx new file mode 100644 index 00000000..1a7f2a10 --- /dev/null +++ b/ai-agents/crew-ai/crew-ai-coordinator-agent.mdx @@ -0,0 +1,121 @@ +--- +title: "Coordinator / Relay Agent with CrewAI" +sidebarTitle: "Coordinator Agent" +description: "Create a CrewAI coordinator that fans out to specialized agents and merges their answers before streaming to CometChat." +--- + +import { Steps, Step } from 'mintlify'; + +Handle complex questions by routing to the right specialist (billing, support, research) and returning a single, well-structured answer. + +*** + +## What you’ll build + +- Multiple CrewAI agents (specialists) plus a **coordinator** agent. +- A workflow that sequences or hierarchically calls those agents. +- NDJSON streaming so CometChat shows progress as the coordinator works. + +*** + +## Prerequisites + +- CrewAI project ([crew-ai.mdx](/ai-agents/crew-ai)) +- Agent configs for each specialist (billing/support/research/etc.) + +*** + +## Steps + + + + Add agents like billing, support, and research with narrow goals and tools. + + + The coordinator inspects the user question, decides which specialists to call, and merges their outputs. + + + Use Process.sequential for simple fan-out or Process.hierarchical if you want dynamic branching. + + + Keep the existing `/kickoff` NDJSON stream; tool calls from sub-agents will appear in CometChat. + + + +*** + +## Example configuration + +`src/crew_demo/config/agents.yaml` + +```yaml +billing: + role: Billing Specialist + goal: Handle invoices and refunds + backstory: > + Use billing tools only. Escalate if payment methods need human approval. + +support: + role: Support Specialist + goal: Troubleshoot product issues + backstory: > + Ask clarifying questions and suggest next steps using available tools. + +coordinator: + role: Relay Coordinator + goal: Route the request to the right specialist and return a concise summary + backstory: > + Decide which specialist to invoke. Combine their findings into one clear response. +``` + +`src/crew_demo/crew.py` (excerpt) + +```python +from crew_demo.tools.get_deals import get_recent_deals + +@agent +def billing(self) -> Agent: + return Agent(config=self.agents_config["billing"], tools=[get_recent_deals], verbose=False) + +@agent +def support(self) -> Agent: + return Agent(config=self.agents_config["support"], tools=[], verbose=False) + +@agent +def coordinator(self) -> Agent: + return Agent(config=self.agents_config["coordinator"], tools=[], verbose=False) + +@crew +def crew(self) -> Crew: + return Crew( + agents=[self.coordinator(), self.billing(), self.support()], + tasks=self.tasks, + process=Process.sequential, # swap to Process.hierarchical for dynamic routing + verbose=False, + stream=True, + ) +``` + +`src/crew_demo/config/tasks.yaml` + +```yaml +coordination_task: + description: > + Decide which specialist should handle: {user_message} + Share a merged answer back to the user. + expected_output: > + A concise response summarizing each specialist's findings. + agent: coordinator +``` + +*** + +## CometChat setup + +- Provider: **CrewAI** +- Agent ID: `coordinator` +- Deployment URL: `/kickoff` + +Use suggested prompts like “Ask billing if invoice #123 is paid and summarize it” to exercise multi-agent flows. + +--- diff --git a/ai-agents/crew-ai/crew-ai-frontend-actions-agent.mdx b/ai-agents/crew-ai/crew-ai-frontend-actions-agent.mdx new file mode 100644 index 00000000..4147d01e --- /dev/null +++ b/ai-agents/crew-ai/crew-ai-frontend-actions-agent.mdx @@ -0,0 +1,119 @@ +--- +title: "Frontend Actions Agent with CrewAI" +sidebarTitle: "Frontend Actions Agent" +description: "Let a CrewAI agent emit safe UI actions (navigation, modals, confetti) that your frontend executes when running inside CometChat." +--- + +import { Steps, Step } from 'mintlify'; + +Give your chat experience UI superpowers: the agent replies with structured actions your frontend can run (e.g., open a product, fire confetti, toggle a theme). + +*** + +## What you’ll build + +- A CrewAI agent that returns **JSON actions** instead of calling backend APIs. +- A tool definition that keeps action payloads predictable. +- A `/kickoff` stream that includes `tool_*` events so the client can map actions. +- A CometChat AI Agent entry that routes traffic to this CrewAI service. + +*** + +## Prerequisites + +- CrewAI project + FastAPI stream from [crew-ai.mdx](/ai-agents/crew-ai) +- Frontend capable of handling action payloads (UI Kit export or custom UI) +- Optional: `canvas-confetti` or your own action handlers + +*** + +## Steps + + + + Decide on a small set of actions your UI will accept (e.g., open_product, show_confetti, toggle_theme). + + + Return structured JSON with action name and parameters. Keep the schema narrow to avoid unsafe payloads. + + + In the backstory, explain when to trigger each action and to keep text responses short when actions are returned. + + + In your widget or UI Kit export, map `tool_call_*` + `tool_result` events to actual UI functions (e.g., fire confetti, navigate). + + + +*** + +## Sample action tool + +`src/crew_demo/tools/confetti_action.py` + +```python +import json +from crewai.tools import tool + + +@tool("trigger_confetti") +def trigger_confetti(celebration: str = "default") -> str: + """Return a confetti action payload for the frontend.""" + return json.dumps({ + "action": "show_confetti", + "parameters": { + "preset": celebration, + "particleCount": 150, + "spread": 70 + } + }) +``` + +Register in `crew.py` and add to the agent’s tool list alongside any other actions. + +*** + +## Agent configuration + +`src/crew_demo/config/agents.yaml` + +```yaml +ui_assistant: + role: Frontend Action Agent + goal: Trigger approved UI actions and keep responses concise + backstory: > + When a user asks to celebrate, call trigger_confetti and include a short celebratory message. + When a user asks to open a product, return an action with name=open_product and include productId. + Never invent action names outside the approved list. Keep explanations brief. +``` + +`src/crew_demo/config/tasks.yaml` + +```yaml +ui_task: + description: > + Decide whether to run a UI action for: {user_message} + expected_output: > + Either a short text response or a tool-triggered action. + agent: ui_assistant +``` + +*** + +## Client-side handling + +- Listen for `tool_call_start`/`tool_call_args` to show “running action…” states. +- Parse `tool_result` JSON and map the `action` name to a handler. +- Keep handlers idempotent and validate parameters on the client before executing. + +*** + +## Connect to CometChat + +Use the same `/kickoff` endpoint as your CrewAI project. In Dashboard → AI Agents: + +- Provider: **CrewAI** +- Agent ID: `ui_assistant` +- Deployment URL: your `/kickoff` +- (Optional) Suggested prompts: “Launch confetti”, “Open the product page for sku-123” + +--- diff --git a/ai-agents/crew-ai/crew-ai-group-chat-agent.mdx b/ai-agents/crew-ai/crew-ai-group-chat-agent.mdx new file mode 100644 index 00000000..1c75f007 --- /dev/null +++ b/ai-agents/crew-ai/crew-ai-group-chat-agent.mdx @@ -0,0 +1,93 @@ +--- +title: "Group Chat Agent with CrewAI" +sidebarTitle: "Group Chat Agent" +description: "Create a CrewAI agent that lives in group rooms, responds only when mentioned, and streams NDJSON to CometChat." +--- + +import { Steps, Step } from 'mintlify'; + +Keep group chats clean: the agent replies only when explicitly tagged, includes mentions in its response, and keeps context short. + +*** + +## What you’ll build + +- A CrewAI agent that checks for mentions (e.g., `@agent`). +- Backstory rules to ignore chatter unless addressed. +- A `/kickoff` stream that returns concise answers and optional tool calls. + +*** + +## Prerequisites + +- CrewAI project and `/kickoff` endpoint from [crew-ai.mdx](/ai-agents/crew-ai) +- Client ability to pass conversation history and mention metadata + +*** + +## Steps + + + + Decide on a handle (e.g., @agent or @support) that users will type in group rooms. + + + Pass a boolean or mention list into the task input so the agent knows when it is addressed. + + + In the backstory, limit reply length and avoid re-quoting the entire thread. + + + Point your AI Agent to the CrewAI `/kickoff` URL and set a friendly display name/icon. + + + +*** + +## Agent configuration + +`src/crew_demo/config/agents.yaml` + +```yaml +group_agent: + role: Group Chat Assistant + goal: Reply only when mentioned and keep messages concise + backstory: > + Respond only if the latest message contains "@agent" (case-insensitive). + Keep replies under 4 sentences. If not mentioned, politely stay silent. +``` + +`src/crew_demo/config/tasks.yaml` + +```yaml +group_task: + description: > + If mentioned: respond to {user_message}. Otherwise return an empty reply. + Conversation: {conversation_history} + expected_output: > + A short answer or an empty string when not mentioned. + agent: group_agent +``` + +In your FastAPI kickoff, include a flag: + +```python +inputs = { + "user_message": request.messages[-1].content, + "conversation_history": "\n".join([...]), + "mentioned": "@agent" in request.messages[-1].content.lower(), +} +``` + +The task can check `mentioned` in its prompt and return an empty string to suppress replies. + +*** + +## CometChat setup + +- Provider: **CrewAI** +- Agent ID: `group_agent` +- Deployment URL: `/kickoff` +- Suggested prompts: “@agent what is our refund policy?” + +--- diff --git a/ai-agents/crew-ai/crew-ai-handoff-agent.mdx b/ai-agents/crew-ai/crew-ai-handoff-agent.mdx new file mode 100644 index 00000000..659be52b --- /dev/null +++ b/ai-agents/crew-ai/crew-ai-handoff-agent.mdx @@ -0,0 +1,106 @@ +--- +title: "Human Handoff Agent with CrewAI" +sidebarTitle: "Handoff Agent" +description: "Build a CrewAI agent that knows its limits and escalates to humans with a clear reason and target." +--- + +import { Steps, Step } from 'mintlify'; + +Teach the agent to say “I need a human” and emit a structured handoff payload CometChat can route to your team. + +*** + +## What you’ll build + +- A CrewAI tool that emits a `handoff` payload (reason + target + context). +- Agent instructions that decide when to escalate. +- NDJSON streaming so CometChat can display escalation status in real time. + +*** + +## Prerequisites + +- CrewAI project + `/kickoff` streaming endpoint ([crew-ai.mdx](/ai-agents/crew-ai)) +- A human support flow in your product (ticketing, live agent queue, etc.) + +*** + +## Steps + + + + Decide where to route: support, sales, billing, or a specific user ID. + + + Return a JSON payload with target, reason, and optional priority. Raise exceptions if required fields are missing. + + + In the backstory, describe clear thresholds for escalation (e.g., compliance, billing disputes, missing permissions). + + + When CometChat receives a `tool_result` for `handoff`, trigger your own UI/notification flow. + + + +*** + +## Sample handoff tool + +`src/crew_demo/tools/handoff.py` + +```python +import json +from crewai.tools import tool + + +@tool("handoff") +def handoff(target: str, reason: str, priority: str = "normal") -> str: + """Escalate to a human with routing details.""" + if not target or not reason: + raise Exception("target and reason are required for handoff") + return json.dumps({ + "action": "handoff", + "target": target, + "reason": reason, + "priority": priority + }) +``` + +Add to your agent’s tool list in `crew.py`. + +*** + +## Agent configuration + +`src/crew_demo/config/agents.yaml` + +```yaml +handoff: + role: Escalation Specialist + goal: Detect when a human is needed and route correctly + backstory: > + If the request involves account cancellation, payments, or legal topics, call the handoff tool. + Explain briefly why escalation is required. Keep user-facing text short and polite. +``` + +`src/crew_demo/config/tasks.yaml` + +```yaml +handoff_task: + description: > + Decide whether to answer or escalate: {user_message} + expected_output: > + If escalation is required, trigger the handoff tool with target + reason. + agent: handoff +``` + +*** + +## CometChat integration + +- Provider: **CrewAI** +- Agent ID: `handoff` +- Deployment URL: your `/kickoff` +- Client: map the `handoff` tool payload to your routing layer (open a ticket, page on-call, or DM a user). + +--- diff --git a/ai-agents/crew-ai-knowlege-agent-pdf.mdx b/ai-agents/crew-ai/crew-ai-knowlege-agent-pdf.mdx similarity index 100% rename from ai-agents/crew-ai-knowlege-agent-pdf.mdx rename to ai-agents/crew-ai/crew-ai-knowlege-agent-pdf.mdx diff --git a/ai-agents/crew-ai/crew-ai-orchestrator-agent.mdx b/ai-agents/crew-ai/crew-ai-orchestrator-agent.mdx new file mode 100644 index 00000000..83bceda3 --- /dev/null +++ b/ai-agents/crew-ai/crew-ai-orchestrator-agent.mdx @@ -0,0 +1,81 @@ +--- +title: "Orchestrator Agent with CrewAI" +sidebarTitle: "Orchestrator Agent" +description: "Use CrewAI to orchestrate multiple agents and tools, enforce steps, and return a single streamed answer to CometChat." +--- + +import { Steps, Step } from 'mintlify'; + +When requests require multiple skills—research, calculations, approvals—an orchestrator coordinates the flow and keeps users updated through streaming events. + +*** + +## What you’ll build + +- An orchestrator agent that plans steps, invokes sub-agents/tools, and composes the final reply. +- Tasks that enforce ordering (e.g., research → summarize → verify). +- NDJSON streaming so users see progress. + +*** + +## Prerequisites + +- CrewAI multi-agent setup from [crew-ai.mdx](/ai-agents/crew-ai) +- At least two specialized agents or tools to orchestrate + +*** + +## Steps + + + + Define the stages your orchestrator must follow (e.g., gather context → call tool → summarize). + + + In the backstory, describe exactly when to call each sub-agent/tool and how to merge outputs. + + + Keep stream=True on the Crew so CometChat receives progress as steps complete. + + + Try multi-part requests to ensure the orchestrator doesn’t skip steps. + + + +*** + +## Example backstory + +`src/crew_demo/config/agents.yaml` + +```yaml +orchestrator: + role: Orchestration Lead + goal: Plan and execute the right sequence of tools/agents + backstory: > + Always outline a short plan, then execute tools or call specialists as needed. + Merge their findings into one concise answer. Do not expose raw plan text to the user. +``` + +`src/crew_demo/config/tasks.yaml` + +```yaml +orchestrator_task: + description: > + Plan the best sequence to solve: {user_message} + expected_output: > + A clear answer plus any key data. Mention which tools were used. + agent: orchestrator +``` + +Use `Process.hierarchical` if you want the orchestrator to decide dynamically which sub-agent to call next. + +*** + +## CometChat setup + +Provider: **CrewAI**, Agent ID: `orchestrator`, Deployment URL: `/kickoff`. + +Add suggested prompts like “Find our three latest deals, then draft a summary for the customer” to exercise orchestration. + +--- diff --git a/ai-agents/crew-ai/crew-ai.text b/ai-agents/crew-ai/crew-ai.text new file mode 100644 index 00000000..a0ae1f7e --- /dev/null +++ b/ai-agents/crew-ai/crew-ai.text @@ -0,0 +1,1246 @@ +Integrating CrewAI agent with CometChat + +CrewAI CometChat Integration Guide + +The CometChat Agentic Interface enables developers and product owners to connect their AI agents, built on platforms like Agno, AG2, Vercel AI SDK, Mastra, Rasa, CrewAI, or custom frameworks, directly with their end users through CometChat. + +This approach follows a Bring Your Own Agent (BYOA) model where you build and host your own agent, and CometChat provides the secure communication layer, authentication, and real-time delivery between users and agents. + +This guide explains how to integrate a CrewAI agent with CometChat using Python and FastAPI. + +Overview + +The CometChat Agentic Platform operates over HTTP with streaming support. You expose an HTTP endpoint where CometChat sends user messages and receives responses from your agent. Your CrewAI agent handles the AI logic and tool execution, while your FastAPI server manages the HTTP layer and newline-delimited JSON (NDJSON) streaming. + +Once configured, your CrewAI Agent will be able to: + +Receive and respond to CometChat user messages + +Process inputs using CrewAI's agent framework + +Execute tools and functions (you can define any number of tools) + +Stream responses back in real-time to CometChat clients + +Prerequisites + +Before starting, ensure you have: + +Python 3.13.2 or higher installed + +A working knowledge of CrewAI (v1.7.2) + +A CometChat account (available at https://app.cometchat.com) + +An OpenAI API key + +(Optional) A Serper API key for web search capabilities + +(Optional) An OpenWeatherMap API key for weather information + +Installation + +This guide uses uv for fast, reliable Python package management. If you don't have uv installed: + +# Install uv +curl -LsSf https://astral.sh/uv/install.sh | sh + +# Or using pip +pip install uv + + +Create a New CrewAI Project + +# Create a new CrewAI crew +uv tool run crewai create crew crew_demo + +# Navigate to the project +cd crew_demo + + +Install Dependencies + +Add the required dependencies to your pyproject.toml: + +[project] +name = "crew-demo" +version = "0.1.0" +description = "CrewAI agent integrated with CometChat" +authors = [{name = "Your Name", email = "your.email@example.com"}] +requires-python = ">=3.13" +dependencies = [ + "crewai[tools]==1.7.2", + "fastapi>=0.115.6", + "uvicorn>=0.34.0", + "httpx>=0.28.1", + "python-dotenv>=1.0.1", + "openai>= 1.13.3" +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv] +dev-dependencies = [] + + +Install all dependencies: + +uv sync + + +Project Structure + +After creation and setup, your project structure should look like this: + +crew-demo/ +│ +├── knowledge/ +│ └── user_preference.txt # Optional: Agent knowledge base +│ +├── src/ +│ └── crew_demo/ +│ ├── __init__.py +│ ├── config/ +│ │ ├── agents.yaml # Agent definitions +│ │ └── tasks.yaml # Task definitions +│ ├── crew.py # Crew setup and configuration +│ ├── main.py # FastAPI server with streaming +│ └── tools/ +│ ├── __init__.py +│ └── weather_tool.py # Custom weather tool +│ +├── tests/ # Test files +├── .env # Environment variables +├── pyproject.toml # Project dependencies +└── README.md +└── uv.lock + + +Step 1: Configure Environment Variables + +Create a .env file in the project root: + +# OpenAI Configuration +MODEL=gpt-4o-mini +OPENAI_API_KEY=your_openai_api_key_here + +# Tool API Keys +SERPER_API_KEY=your_serper_api_key_here +OPENWEATHER_API_KEY=your_openweather_api_key_here + +# Server Configuration +PORT=8000 +HOST=0.0.0.0 + +# CrewAI Configuration (disable tracing for production) +CREWAI_TRACING_ENABLED=false + + +Important Notes: + +Get your OpenAI API key from https://platform.openai.com/api-keys + +Get Serper API key (for web search) from https://serper.dev/ + +Get OpenWeatherMap API key from https://openweathermap.org/api + +Step 2: Create the Weather Tool + +Create src/crew_demo/tools/weather_tool.py: + +"""Weather tool for fetching current weather information""" + +import os +import json +from datetime import datetime, timezone + +import httpx +from crewai.tools import tool + + +@tool("get_weather") +def get_weather(location: str, unit: str = "fahrenheit") -> str: + """Get the current weather for a location + + Args: + location: The city name, e.g. San Francisco, Mumbai, London + unit: The temperature unit (celsius or fahrenheit) + + Returns: + Weather information as a JSON string with temperature, conditions, etc. + + Raises: + Exception: If the API call fails or location is not found + """ + + api_key = os.getenv("OPENWEATHER_API_KEY") + if not api_key: + raise Exception("OPENWEATHER_API_KEY environment variable not set") + + url = "http://api.openweathermap.org/data/2.5/weather" + params = { + "q": location, + "appid": api_key, + "units": "imperial" if unit.lower() == "fahrenheit" else "metric" + } + + try: + response = httpx.get(url, params=params, timeout=10.0) + response.raise_for_status() + + data = response.json() + + # Extract relevant information + result = { + "location": data["name"], + "country": data["sys"]["country"], + "temperature": round(data["main"]["temp"], 1), + "feels_like": round(data["main"]["feels_like"], 1), + "unit": unit, + "conditions": data["weather"][0]["description"], + "humidity": data["main"]["humidity"], + "wind_speed": data["wind"]["speed"], + "timestamp": datetime.now(tz=timezone.utc).isoformat() + } + + # Return clean JSON - NO error field when successful + return json.dumps(result) + + except httpx.HTTPStatusError as e: + if e.response.status_code == 404: + raise Exception( + f"Location '{location}' not found. " + "Please check the spelling and try again." + ) + else: + raise Exception( + f"Weather API returned error {e.response.status_code}. " + "Please try again later." + ) + + except httpx.TimeoutException: + raise Exception("Weather service timed out. Please try again.") + + except httpx.RequestError as e: + raise Exception(f"Could not connect to weather service: {str(e)}") + + except Exception as e: + raise Exception(f"Unexpected error fetching weather: {str(e)}") + + +Key Design Decisions: + +Raises exceptions for errors: Makes it clear to the agent when something fails + +Returns clean JSON on success: No "error" field needed + +Type hints and docstrings: Helps the LLM understand how to use the tool + +Step 3: Define Your Agent + +Create src/crew_demo/config/agents.yaml: + +assistant: + role: > + Friendly Conversational Assistant + goal: > + Help users with questions, greetings, and provide weather information when requested + backstory: > + You're a helpful, friendly assistant who responds naturally to greetings + and casual conversation. + + When users ask about weather, you MUST use the get_weather tool. After the tool + returns results, check if there is an "error" field in the response: + - If there's an "error" field, tell the user the weather could not be retrieved + - If there's NO "error" field, the tool succeeded - use the weather data to give + the user a helpful response with temperature, conditions, and other details + + IMPORTANT: When a tool returns data successfully (no error field), that means it + worked! Don't apologize or say you couldn't get the data. Use the data that was + returned. + + For general questions, you can check your training data or search the web if needed. + + Always respond with ONLY your final answer - never show your thinking process, + thoughts, or tool observations to the user. The user should only see your final + response. + + +Important: The backstory explicitly tells the agent: + +How to interpret tool results (no error = success) + +When to use which tool + +To hide reasoning and show only the final answer + +Step 4: Define Your Task + +Create src/crew_demo/config/tasks.yaml: + +conversation_task: + description: > + Respond to the user's message: {user_message} + + Previous conversation context: {conversation_history} + + If it's a greeting, respond warmly. If asking about weather, use the weather tool. + For other questions, use search if needed. + expected_output: > + A helpful, natural response to the user's query + agent: assistant + + +Task Parameters: + +{user_message}: The current user message + +{conversation_history}: Previous messages for context + +Step 5: Set Up the Crew + +Create src/crew_demo/crew.py: + +"""CrewAI - Crew Configuration""" + +from typing import List + +from crewai_tools import SerperDevTool +from crewai.crew import EventListener +from crewai import Agent, Crew, Process, Task +from crewai.agents.agent_builder.base_agent import BaseAgent +from crewai.events.utils.console_formatter import ConsoleFormatter +from crewai.project import CrewBase, agent, crew, task + +# Import your custom weather tool +from crew_demo.tools.weather_tool import get_weather + +# Configure event listener for optional logging +my_listener = EventListener() +my_listener.formatter = ConsoleFormatter(verbose=False) + + +@CrewBase +class CrewDemo(): + """CrewDemo crew""" + + agents: List[BaseAgent] + tasks: List[Task] + + # Initialize tools + search_tool = SerperDevTool() + + # Config file paths + agents_config = "config/agents.yaml" + tasks_config = "config/tasks.yaml" + + @agent + def assistant(self) -> Agent: + """Conversational Assistant Agent""" + return Agent( + config=self.agents_config['assistant'], # type: ignore[index] + tools=[self.search_tool, get_weather], + verbose=False, # Set to True for debugging + memory=False # Stateless - we pass conversation history manually + ) + + @task + def conversation_task(self) -> Task: + """Conversation Task""" + return Task( + config=self.tasks_config['conversation_task'], # type: ignore[index] + markdown=True, + verbose=False, + ) + + @crew + def crew(self) -> Crew: + """Creates the CrewDemo crew""" + return Crew( + agents=self.agents, + tasks=self.tasks, + process=Process.sequential, + verbose=False, + stream=True, # Enable streaming + output_log_file=False, + tracing=False, + ) + + +Key Configuration: + +memory=False: We handle conversation history manually + +stream=True: Enables real-time streaming + +verbose=False: Hides internal logging (set True for debugging) + +Step 6: Create the FastAPI Server with Streaming + +Create src/crew_demo/main.py: + +"""Crew entry point with improved filtering""" + +import os +import json +import warnings +from typing import List, Optional +from datetime import datetime +from uuid import uuid4 + +from crewai.types.streaming import StreamChunk, StreamChunkType, CrewStreamingOutput + +import uvicorn +from pydantic import BaseModel + +from fastapi import FastAPI +from fastapi.responses import StreamingResponse + +from crew_demo.crew import CrewDemo + +warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd") + +app = FastAPI() + +class Message(BaseModel): + """message fields""" + role: str + content: str + +class KickoffRequest(BaseModel): + """Incoming request model""" + messages: List[Message] + threadId: Optional[str] = None + runId: Optional[str] = None + +def stream_crew(inputs: dict): + """Generator that yields AG-UI compatible JSON chunks with robust filtering.""" + + message_id = str(uuid4()) + + yield json.dumps({ + "type": "text_start", + "message_id": message_id + }) + "\n" + + streaming: CrewStreamingOutput = CrewDemo().crew().kickoff(inputs=inputs) + + # State tracking + buffer = "" + final_answer_started = False + final_answer_content = "" + thought_keywords = ["Thought:", "Action:", "Action Input:", "Observation:"] + + for chunk in streaming: + if not isinstance(chunk, StreamChunk): + continue + + # Handle tool calls + if chunk.chunk_type == StreamChunkType.TOOL_CALL and chunk.tool_call: + tool_call_id = str(uuid4()) + + yield json.dumps({ + "type": "tool_call_start", + "tool_call_id": tool_call_id, + "name": chunk.tool_call.tool_name, + "parent_message_id": message_id + }) + "\n" + + try: + args = json.loads( + chunk.tool_call.arguments + ) if isinstance(chunk.tool_call.arguments, str) else chunk.tool_call.arguments + + yield json.dumps({ + "type": "tool_call_args", + "tool_call_id": tool_call_id, + "args": args + }) + "\n" + except json.JSONDecodeError: + pass + + yield json.dumps({ + "type": "tool_call_end", + "tool_call_id": tool_call_id, + "name": chunk.tool_call.tool_name + }) + "\n" + + if hasattr(chunk.tool_call, 'result') and chunk.tool_call.result: + yield json.dumps({ + "type": "tool_result", + "tool_call_id": tool_call_id, + "result": chunk.tool_call.result + }) + "\n" + + continue + + # Handle text chunks + if chunk.chunk_type == StreamChunkType.TEXT: + buffer += chunk.content + + # Look for "Final Answer:" marker + if not final_answer_started and "Final Answer:" in buffer: + final_answer_started = True + + # Extract everything after "Final Answer:" + parts = buffer.split("Final Answer:")[-1].strip() + if len(parts) > 1: + final_answer_content = parts[1].strip() + + # Stream what we have so far (if any) + if final_answer_content: + yield json.dumps({ + "type": "text_delta", + "message_id": message_id, + "content": final_answer_content + }) + "\n" + + buffer = "" # Clear buffer + + elif final_answer_started: + # We're in final answer mode + # Check if we're hitting a new thought process + if any(keyword in chunk.content for keyword in thought_keywords): + # Stop streaming - we've hit the thought process again + final_answer_started = False + break + + # Stream this chunk + yield json.dumps({ + "type": "text_delta", + "message_id": message_id, + "content": chunk.content + }) + "\n" + + # sometimes, the "Final Answer:" text is not present, so stream all + if not final_answer_started: + yield json.dumps({ + "type": "text_delta", + "message_id": message_id, + "content": buffer.strip() + }) + "\n" + + yield json.dumps({ + "type": "text_end", + "message_id": message_id + }) + "\n" + + yield json.dumps({"type": "done"}) + "\n" + +@app.post("/kickoff") +async def kickoff(request: KickoffRequest): + """This kicks off the request""" + # print(json.dumps(request.model_dump(), indent=4)) + + inputs = { + 'user_message': request.messages[-1].content, + 'conversation_history': "\n".join( + [f"{m.role}: {m.content}" for m in request.messages[:-1]] + ), + 'current_year': str(datetime.now().year) + } + + return StreamingResponse( + stream_crew(inputs), + media_type="application/x-ndjson" + ) + +def run(): + """Start the app""" + port = int(os.getenv("PORT", "8000")) + host = os.getenv("HOST", "0.0.0.0") + + uvicorn.run(app, host=host, port=port) + +if __name__ == "__main__": + run() + + + +Streaming Architecture: + +Tool events are emitted in real-time during the loop. + +Text response is emitted as soon as the chunks arrive. + +Clean separation between tool execution and final answer. + +No thought process leaked - only the final result is shown. + +Step 7: Test Locally + +Run the Server + +# Activate virtual environment (if using uv) +source .venv/bin/activate # On Unix/macOS +# or +.venv\Scripts\activate # On Windows + +# Run the server +crewai run + + +The server will start at http://localhost:8000 + +Test with cURL + +Test 1: Simple Greeting + +curl -X POST http://localhost:8000/kickoff \ + -H "Content-Type: application/json" \ + -d '{ + "thread_id": "thread_123", + "run_id": "run_456", + "messages": [ + { + "role": "user", + "content": "Hello!" + } + ] + }' + + +Expected Response: + +{"type": "text_start", "message_id": "abc-123"} +{"type": "text_delta", "message_id": "abc-123", "content": "Hello! How can I assist you today?"} +{"type": "text_end", "message_id": "abc-123"} +{"type": "done"} + + +Test 2: Weather Query + +curl -X POST http://localhost:8000/kickoff \ + -H "Content-Type: application/json" \ + -d '{ + "thread_id": "thread_123", + "run_id": "run_789", + "messages": [ + { + "role": "user", + "content": "What is the weather in Mumbai?" + } + ] + }' + + +Expected Response: + +{"type": "text_start", "message_id": "def-456"} +{"type": "tool_call_start", "tool_call_id": "tool_789", "name": "get_weather", "parent_message_id": "def-456"} +{"type": "tool_call_args", "tool_call_id": "tool_789", "args": {"location": "Mumbai", "unit": "fahrenheit"}} +{"type": "tool_call_end", "tool_call_id": "tool_789", "name": "get_weather"} +{"type": "tool_result", "tool_call_id": "tool_789", "result": "{\"location\": \"Mumbai\", \"temperature\": 84.2, ...}"} +{"type": "text_delta", "message_id": "def-456", "content": "The current weather in Mumbai is 84.2°F..."} +{"type": "text_end", "message_id": "def-456"} +{"type": "done"} + + +Test 3: Conversation Context + +curl -X POST http://localhost:8000/kickoff \ + -H "Content-Type: application/json" \ + -d '{ + "thread_id": "thread_123", + "run_id": "run_999", + "messages": [ + { + "role": "user", + "content": "What is the weather in Paris?" + }, + { + "role": "assistant", + "content": "The weather in Paris is 15°C and cloudy." + }, + { + "role": "user", + "content": "How about London?" + } + ] + }' + + +The agent will understand "London" refers to weather based on conversation context. + +Step 8: Configure CometChat Agent + +In the CometChat Dashboard: + +Go to your App. + +Navigate to: AI Agents → BYO Agents + +Click "Add Agent" to create a new CrewAI agent or edit the existing CrewAI agent, if already available. + +Fill in the following fields: + +Name: CrewAI Weather Assistant + +Deployment URL: https://yourdomain.com/kickoff + +Greeting: "Hello! I can help you with weather information and answer your questions." + +Headers (optional): Valid JSON with authorization tokens + +Example Header Configuration: + +{ + "Authorization": "Bearer YOUR_TOKEN" +} + + +Important: The JSON must be flat (width = 1). No nested structures. + +Save the configuration + +Understanding the Event Flow + +Request Flow + +User sends message in CometChat app + +CometChat forwards to your /kickoff endpoint. Please make sure the agent is publicly reachable. + +{ + "thread_id": "unique_thread_id", + "run_id": "unique_run_id", + "messages": [ + {"role": "user", "content": "What's the weather like in Shanghai?"} + ] +} + + +Your agent processes and streams events + +CometChat delivers to end user in real-time + +Event Types + +Your CrewAI agent emits these events: + +Text Events: + +text_start: Beginning of assistant message + +text_delta: Chunks of text content + +text_end: End of assistant message + +Tool Events: + +tool_call_start: Tool invocation begins + +tool_call_args: Tool arguments + +tool_call_end: Tool execution complete + +tool_result: Tool execution result + +Control Events: + +done: Stream complete + +error: Error occurred + +Example Complete Flow + +User: "What's the weather in Tokyo?" + +Events Emitted: + +1. text_start (message begins) +2. tool_call_start (calling get_weather) +3. tool_call_args (location: Tokyo) +4. tool_call_end (tool finished) +5. tool_result (weather data) +6. text_delta (agent's response text) +7. text_end (message complete) +8. done (stream finished) + + +User Sees: + +🌤️ Calling get_weather... +📍 Location: Tokyo + +The current weather in Tokyo is 22°C +with clear skies. The humidity is 65% +and wind speed is 8 mph. + + +Customizing Your Agent + +Adding New Tools + +Create a new tool in src/crew_demo/tools/: + +# src/crew_demo/tools/calculator_tool.py +from crewai.tools import tool + +@tool("calculate") +def calculate(expression: str) -> str: + """Safely evaluate a mathematical expression + + Args: + expression: Mathematical expression like "2 + 2" or "10 * 5" + + Returns: + Result of the calculation + """ + try: + # Use a safe eval or math library + result = eval(expression, {"__builtins__": {}}, {}) + return str(result) + except Exception as e: + raise Exception(f"Calculation error: {str(e)}") + + +Register it in crew.py: + +from crew_demo.tools.calculator_tool import calculate + +@agent +def assistant(self) -> Agent: + return Agent( + config=self.agents_config['assistant'], + tools=[self.search_tool, get_weather, calculate], # Add new tool + verbose=False, + memory=False + ) + + +Using Different LLM Providers + +CrewAI supports multiple LLM providers. See CrewAI's LLM documentation. Update your .env: + +Anthropic Claude: + +# .env +ANTHROPIC_API_KEY=your_anthropic_key +LLM_MODEL=claude-3-5-sonnet-20241022 + + +Then in your agent configuration, CrewAI will automatically detect and use Anthropic. + +See CrewAI docs for more providers: https://docs.crewai.com/concepts/llms + +Multi-Agent Workflows + +Create multiple agents for complex workflows: + +# agents.yaml +researcher: + role: Research Specialist + goal: Find accurate information on the web + backstory: Expert at finding and verifying information + +writer: + role: Content Writer + goal: Create engaging, informative responses + backstory: Skilled at turning research into clear answers + + +# crew.py +@agent +def researcher(self) -> Agent: + return Agent( + config=self.agents_config['researcher'], + tools=[self.search_tool], + verbose=False + ) + +@agent +def writer(self) -> Agent: + return Agent( + config=self.agents_config['writer'], + tools=[], + verbose=False + ) + +@crew +def crew(self) -> Crew: + return Crew( + agents=[self.researcher(), self.writer()], + tasks=self.tasks, + process=Process.sequential, # Or Process.hierarchical + verbose=False, + stream=True + ) + + +Requirements for CometChat Compatibility + +Your CrewAI agent must: + +Expose an HTTP endpoint (e.g., /kickoff) that accepts POST requests + +Accept the request format: + +{ + "thread_id": str, + "run_id": str, + "messages": List[Message] +} + + +Stream responses using newline-delimited JSON (NDJSON) with these event types: + +text_start + +text_delta + +text_end + +tool_call_start + +tool_call_args + +tool_call_end + +tool_result + +done + +error + +Include proper media type: + +"Content-Type": "application/x-ndjson" + + +Beyond these requirements, you have complete flexibility in: + +Your agent's implementation + +Tool definitions and execution + +LLM provider choice + +Multi-agent orchestration + +Error handling strategies + +Troubleshooting + +Agent Not Responding + +Check: + +✅ Server is running: curl http://localhost:8000/ + +✅ Environment variables are set correctly + +✅ API keys are valid and have credits + +✅ Review server logs for errors + +Debug: + +# In crew.py, enable verbose mode +verbose=True # Shows agent reasoning + +# In main.py, add logging +import logging +logging.basicConfig(level=logging.DEBUG) + + +Tools Not Executing + +Check: + +✅ Tool is imported and registered in crew.py + +✅ Tool decorator is used: @tool("tool_name") + +✅ Tool docstring is clear and descriptive + +✅ Tool raises exceptions for errors (not return error JSON) + +Debug: + +# Test tool directly +from crew_demo.tools.weather_tool import get_weather +result = get_weather("London") +print(result) + + +Streaming Issues + +Check: + +✅ stream=True in Crew configuration + +✅ Events are yielded with \n at the end + +✅ Using StreamingResponse with media_type="application/x-ndjson" + +✅ CORS is configured if testing from browser + +Debug: + +# Test streaming with curl -N flag +curl -N -X POST http://localhost:8000/kickoff \ + -H "Content-Type: application/json" \ + -d '{"messages": [{"role": "user", "content": "test"}]}' + + +Duplicate Responses or Thought Process Visible + +Solution: This happens when the agent outputs multiple "Final Answer:" sections. The fix is to use streaming.result (already implemented in the provided main.py). + +Verify: + +✅ Using the main.py provided in this guide + +✅ Agent backstory includes instruction to hide thinking + +✅ streaming.result is used instead of parsing chunks + +Memory Issues + +Note: This guide uses stateless agents (memory=False) and passes conversation history manually. This ensures: + +✅ Consistent behavior across requests + +✅ No state pollution between users + +✅ Full control over conversation context + +If you need persistent memory: + +# crew.py +@agent +def assistant(self) -> Agent: + return Agent( + config=self.agents_config['assistant'], + tools=[self.search_tool, get_weather], + verbose=False, + memory=True # Enable memory + ) + + +Deployment + +Local Development + +# Run with auto-reload +uvicorn crew_demo.main:app --reload --host 0.0.0.0 --port 8000 + + +Production Deployment + +This CrewAI agent can be deployed to any platform that supports Python web applications. Ensure: + +Environment variables are configured in your hosting platform + +Endpoint is publicly accessible (HTTPS recommended) + +Server can handle concurrent requests (consider worker processes) + +Logging is configured for monitoring + +Generic Production Command: + +uvicorn crew_demo.main:app --host 0.0.0.0 --port 8000 --workers 4 + + +Important: Once deployed, update the CometChat agent's Deployment URL with your live endpoint (e.g., https://your-deployment-endpoint.com/kickoff). + +Security Recommendations + +Use HTTPS for production endpoints + +Validate incoming requests (optional: verify CometChat signature) + +Set rate limits to prevent abuse + +Monitor API usage and costs + +Keep API keys secure (never commit to git) + +# Example: Add authentication header validation +@app.post("/kickoff") +async def kickoff(request: KickoffRequest, authorization: str = Header(None)): + # Validate authorization token + if authorization != f"Bearer {os.getenv('EXPECTED_TOKEN')}": + raise HTTPException(status_code=401, detail="Unauthorized") + + # Process request... + + +Advanced Features + +Conversation Memory + +Add conversation memory to maintain context across sessions: + +# crew.py +from crewai.memory import ShortTermMemory, LongTermMemory + +@crew +def crew(self) -> Crew: + return Crew( + agents=self.agents, + tasks=self.tasks, + process=Process.sequential, + verbose=False, + stream=True, + memory=True, # Enable crew memory + short_term_memory=ShortTermMemory(), + long_term_memory=LongTermMemory() + ) + + +Custom Knowledge Base + +Place documents in the knowledge/ folder: + +knowledge/ +├── user_preference.txt # User-specific info +├── company_policies.md # Company guidelines +└── product_catalog.json # Product information + + +The agent can access these documents during execution. + +Structured Output + +Force structured responses: + +# tasks.yaml +conversation_task: + description: > + Respond to: {user_message} + expected_output: > + A JSON object with: + { + "response": "your answer here", + "confidence": 0.0-1.0, + "sources": ["source1", "source2"] + } + agent: assistant + output_json: true # Force JSON output + + +Best Practices + +1. Tool Design + +✅ Raise exceptions for errors (don't return error JSON) + +✅ Return structured data when possible + +✅ Include clear docstrings for LLM understanding + +✅ Keep tools focused (single responsibility) + +2. Agent Instructions + +✅ Be explicit about tool usage + +✅ Define success/failure criteria clearly + +✅ Instruct to hide internal reasoning + +✅ Provide examples in backstory + +3. Error Handling + +✅ Catch and handle API errors gracefully + +✅ Provide informative error messages + +✅ Log errors for debugging + +✅ Don't expose internal details to users + +4. Performance + +✅ Use streaming for better UX + +✅ Keep agents stateless when possible + +✅ Cache tool results when appropriate + +✅ Monitor API costs and usage + +5. Testing + +✅ Test each tool independently + +✅ Test with various conversation contexts + +✅ Test error scenarios + +✅ Monitor streaming performance + +Example Use Cases + +1. Customer Support Agent + +# agents.yaml +support_agent: + role: Customer Support Specialist + goal: Help customers with inquiries and issues + backstory: > + Expert at understanding customer needs and providing solutions. + Can search knowledge base, check order status, and escalate when needed. + + +Tools: knowledge search, order lookup, ticket creation + +2. Research Assistant + +# agents.yaml +researcher: + role: Research Assistant + goal: Find and synthesize information from multiple sources + backstory: > + Skilled at web research, fact-checking, and presenting findings clearly. + + +Tools: web search, PDF reader, citation formatter + +3. Data Analyst Agent + +# agents.yaml +analyst: + role: Data Analyst + goal: Analyze data and provide insights + backstory: > + Expert at data analysis, visualization, and explaining findings. + + +Tools: database query, chart generator, statistical analysis + +Conclusion + +By combining the CometChat Agentic Interface with CrewAI, developers can connect sophisticated AI agents with end users instantly and securely. CrewAI provides powerful agent orchestration, tool integration, and multi-agent collaboration, while CometChat handles the real-time communication layer, authentication, and message delivery. + +This architecture provides the flexibility of building agents with any LLM backend and custom tool integrations while maintaining the scalability and security of CometChat's platform. + +Additional Resources + +CrewAI Documentation: https://docs.crewai.com/ + +CometChat Dashboard: https://app.cometchat.com/ + +CometChat Docs: https://docs.cometchat.com/ + +OpenAI API: https://platform.openai.com/docs + +FastAPI Documentation: https://fastapi.tiangolo.com/ + +Support + +For issues or questions: + +CrewAI: https://github.com/crewAIInc/crewAI/issues + +CometChat: https://www.cometchat.com + +Version: 0.0.1 +CrewAI Version: 1.7.2 +Python Version: 3.13.2+ \ No newline at end of file From 981400721c74bded82352ac1ed9968b745963f92 Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Thu, 1 Jan 2026 17:32:00 +0530 Subject: [PATCH 03/15] updates folder structure --- .../crew-ai-agents-playbook.mdx | 0 .../crew-ai-backend-tools-agent.mdx | 0 .../{crew-ai => crew-ai-additional docs}/crew-ai-chef-agent.mdx | 0 .../crew-ai-coordinator-agent.mdx | 0 .../crew-ai-frontend-actions-agent.mdx | 0 .../crew-ai-group-chat-agent.mdx | 0 .../crew-ai-handoff-agent.mdx | 0 .../crew-ai-knowlege-agent-pdf.mdx | 0 .../crew-ai-orchestrator-agent.mdx | 0 ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai.text | 0 10 files changed, 0 insertions(+), 0 deletions(-) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai-agents-playbook.mdx (100%) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai-backend-tools-agent.mdx (100%) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai-chef-agent.mdx (100%) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai-coordinator-agent.mdx (100%) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai-frontend-actions-agent.mdx (100%) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai-group-chat-agent.mdx (100%) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai-handoff-agent.mdx (100%) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai-knowlege-agent-pdf.mdx (100%) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai-orchestrator-agent.mdx (100%) rename ai-agents/{crew-ai => crew-ai-additional docs}/crew-ai.text (100%) diff --git a/ai-agents/crew-ai/crew-ai-agents-playbook.mdx b/ai-agents/crew-ai-additional docs/crew-ai-agents-playbook.mdx similarity index 100% rename from ai-agents/crew-ai/crew-ai-agents-playbook.mdx rename to ai-agents/crew-ai-additional docs/crew-ai-agents-playbook.mdx diff --git a/ai-agents/crew-ai/crew-ai-backend-tools-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-backend-tools-agent.mdx similarity index 100% rename from ai-agents/crew-ai/crew-ai-backend-tools-agent.mdx rename to ai-agents/crew-ai-additional docs/crew-ai-backend-tools-agent.mdx diff --git a/ai-agents/crew-ai/crew-ai-chef-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-chef-agent.mdx similarity index 100% rename from ai-agents/crew-ai/crew-ai-chef-agent.mdx rename to ai-agents/crew-ai-additional docs/crew-ai-chef-agent.mdx diff --git a/ai-agents/crew-ai/crew-ai-coordinator-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-coordinator-agent.mdx similarity index 100% rename from ai-agents/crew-ai/crew-ai-coordinator-agent.mdx rename to ai-agents/crew-ai-additional docs/crew-ai-coordinator-agent.mdx diff --git a/ai-agents/crew-ai/crew-ai-frontend-actions-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-frontend-actions-agent.mdx similarity index 100% rename from ai-agents/crew-ai/crew-ai-frontend-actions-agent.mdx rename to ai-agents/crew-ai-additional docs/crew-ai-frontend-actions-agent.mdx diff --git a/ai-agents/crew-ai/crew-ai-group-chat-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-group-chat-agent.mdx similarity index 100% rename from ai-agents/crew-ai/crew-ai-group-chat-agent.mdx rename to ai-agents/crew-ai-additional docs/crew-ai-group-chat-agent.mdx diff --git a/ai-agents/crew-ai/crew-ai-handoff-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-handoff-agent.mdx similarity index 100% rename from ai-agents/crew-ai/crew-ai-handoff-agent.mdx rename to ai-agents/crew-ai-additional docs/crew-ai-handoff-agent.mdx diff --git a/ai-agents/crew-ai/crew-ai-knowlege-agent-pdf.mdx b/ai-agents/crew-ai-additional docs/crew-ai-knowlege-agent-pdf.mdx similarity index 100% rename from ai-agents/crew-ai/crew-ai-knowlege-agent-pdf.mdx rename to ai-agents/crew-ai-additional docs/crew-ai-knowlege-agent-pdf.mdx diff --git a/ai-agents/crew-ai/crew-ai-orchestrator-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-orchestrator-agent.mdx similarity index 100% rename from ai-agents/crew-ai/crew-ai-orchestrator-agent.mdx rename to ai-agents/crew-ai-additional docs/crew-ai-orchestrator-agent.mdx diff --git a/ai-agents/crew-ai/crew-ai.text b/ai-agents/crew-ai-additional docs/crew-ai.text similarity index 100% rename from ai-agents/crew-ai/crew-ai.text rename to ai-agents/crew-ai-additional docs/crew-ai.text From f2cc98de1883510f910dab59bc4f5a9929f85edd Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Thu, 1 Jan 2026 17:32:03 +0530 Subject: [PATCH 04/15] Update docs.json --- docs.json | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docs.json b/docs.json index 8f9b6b5d..2e88fc17 100644 --- a/docs.json +++ b/docs.json @@ -4865,6 +4865,27 @@ } ] }, + { + "dropdown": "CrewAI", + "icon": "/images/icons/crew-ai.svg", + "pages": [ + "/ai-agents/crewai", + "/ai-agents/crewai-actions", + "/ai-agents/crewai-tools", + { + "group": "Guides", + "pages": [ + "/ai-agents/crewai-knowledge-agent" + ] + }, + { + "group": "Tutorials", + "pages": [ + "/ai-agents/crewai-product-hunt-agent" + ] + } + ] + }, { "dropdown": "Agno", "icon": "/images/icons/agno.svg", From ace1d41fe181a2d87545a26901280e50d699a2f3 Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Thu, 1 Jan 2026 17:34:31 +0530 Subject: [PATCH 05/15] Update docs.json --- docs.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs.json b/docs.json index 2e88fc17..9efaf76d 100644 --- a/docs.json +++ b/docs.json @@ -4869,19 +4869,19 @@ "dropdown": "CrewAI", "icon": "/images/icons/crew-ai.svg", "pages": [ - "/ai-agents/crewai", - "/ai-agents/crewai-actions", - "/ai-agents/crewai-tools", + "/ai-agents/crew-ai", + "/ai-agents/crew-ai-actions", + "/ai-agents/crew-ai-tools", { "group": "Guides", "pages": [ - "/ai-agents/crewai-knowledge-agent" + "/ai-agents/crew-ai-knowledge-agent" ] }, { "group": "Tutorials", "pages": [ - "/ai-agents/crewai-product-hunt-agent" + "/ai-agents/crew-ai-product-hunt-agent" ] } ] From bb88cd95caabfc0b8ff8e836a345c62df06b4b54 Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Thu, 1 Jan 2026 18:00:55 +0530 Subject: [PATCH 06/15] updates content for crew ai --- ai-agents/crew-ai-product-hunt-agent.mdx | 9 + ai-agents/crew-ai.mdx | 409 ++++++----------------- 2 files changed, 110 insertions(+), 308 deletions(-) diff --git a/ai-agents/crew-ai-product-hunt-agent.mdx b/ai-agents/crew-ai-product-hunt-agent.mdx index ca5f752f..92f07b2d 100644 --- a/ai-agents/crew-ai-product-hunt-agent.mdx +++ b/ai-agents/crew-ai-product-hunt-agent.mdx @@ -27,6 +27,15 @@ Give your chats Product Hunt superpowers: search launches, surface top posts, an --- +## How it works + +- **Agent** — configure tools for top lists, timeframe queries, search, and confetti; clamp limits and validate inputs. +- **Data** — Product Hunt GraphQL for live votes (needs `PRODUCTHUNT_API_TOKEN`) and Algolia search for discovery. +- **API** — expose FastAPI (or similar) routes for top/search plus a `/kickoff` chat endpoint that streams NDJSON (`text_delta`, `tool_*`, `text_end`, `done`). +- **Frontend** — when the confetti tool fires, your UI listens to `tool_result` and triggers the celebration handler. + +--- + ## Steps diff --git a/ai-agents/crew-ai.mdx b/ai-agents/crew-ai.mdx index 0887c5cd..82ea274f 100644 --- a/ai-agents/crew-ai.mdx +++ b/ai-agents/crew-ai.mdx @@ -1,26 +1,26 @@ --- title: "Create an AI Agent with CrewAI" sidebarTitle: "Create AI Agent" -description: "Wire a CrewAI agent to CometChat, stream messages over NDJSON, and ship it through UI Kit Builder or the Chat Widget." +description: "Connect a CrewAI agent to CometChat, customize it with UI Kit Builder, and ship it as React UI Kit code or a Chat Widget." --- import { Steps, Step, CardGroup, Card } from 'mintlify'; ## What you’ll build -- A **CrewAI** project (Python + FastAPI) that streams NDJSON events CometChat can consume. -- A simple **weather tool** plus room to add your own tools. -- A **CometChat AI Agent** entry pointing to your CrewAI `/kickoff` endpoint. -- Optional **UI exports** via **UI Kit Builder** or the **Chat Widget**. +- A **CrewAI** agent exposed via a public endpoint (e.g., FastAPI `/kickoff`) that streams NDJSON. +- The same agent **connected to CometChat** (Agent ID + Deployment URL). +- A **customized chat experience** using **UI Kit Builder**. +- An export to **React UI Kit code** _or_ **Chat Widget** for integration. --- ## Prerequisites -- Python **3.13.2+** with `uv` or `pip` -- CrewAI **v1.7.2** -- OpenAI API key (or Anthropic via environment) -- A CometChat app: **[Create App](https://app.cometchat.com/apps)** +- A CometChat account and an app: **[Create App](https://app.cometchat.com/apps)** +- A CrewAI agent endpoint (Python/FastAPI sample shown below) +- Python 3.13+ with your preferred package manager (`uv` or `pip`) +- OpenAI (or Anthropic) API key for the agent --- @@ -37,329 +37,122 @@ import { Steps, Step, CardGroup, Card } from 'mintlify'; --- -## Step 2 - Scaffold a CrewAI project +## Step 2 - Connect your CrewAI Agent -Use `uv` (fast Python package manager). Install it if needed: +Navigate to **AI Agent → Get Started** and then **AI Agents → Add Agent**. -```bash -curl -LsSf https://astral.sh/uv/install.sh | sh -# or -pip install uv -``` - -Create a CrewAI project: - -```bash -uv tool run crewai create crew crew_demo -cd crew_demo -``` - -Update `pyproject.toml` with required dependencies: - -```toml -[project] -name = "crew-demo" -version = "0.1.0" -description = "CrewAI agent integrated with CometChat" -requires-python = ">=3.13" -dependencies = [ - "crewai[tools]==1.7.2", - "fastapi>=0.115.6", - "uvicorn>=0.34.0", - "httpx>=0.28.1", - "python-dotenv>=1.0.1", - "openai>=1.13.3" -] - -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" -``` - -Install everything: - -```bash -uv sync -``` - ---- - -## Step 3 - Configure environment + + + Select **CrewAI**. + + + Provide: +
    +
  • Name and optional Icon
  • +
  • (Optional) Greeting and Introductory Message
  • +
  • (Optional) Suggested messages
  • +
+
+ + Paste: +
    +
  • Agent ID — a unique handle that matches how you route traffic (e.g., support).
  • +
  • Deployment URL — the public HTTPS endpoint exposed by your CrewAI service (e.g., /kickoff).
  • +
  • (Optional) Headers — flat JSON auth headers your FastAPI deployment expects.
  • +
+
+ + Click **Save**, then ensure the agent’s toggle is **ON** in **AI Agents** list. + +
-Create a `.env` in your project root: - -```bash -MODEL=gpt-4o-mini -OPENAI_API_KEY=your_openai_api_key_here -SERPER_API_KEY=your_serper_api_key_here # optional search -OPENWEATHER_API_KEY=your_openweather_api_key_here -PORT=8000 -HOST=0.0.0.0 -CREWAI_TRACING_ENABLED=false # disable for production -``` +> **Tip:** If you update your CrewAI agent later (prompts, tools, routing), you won’t need to re-connect it in CometChat—just keep the **Agent ID** and **Deployment URL** the same. --- -## Step 4 - Add a tool (weather) - -`src/crew_demo/tools/weather_tool.py` - -```python -import os, json -from datetime import datetime, timezone -import httpx -from crewai.tools import tool - - -@tool("get_weather") -def get_weather(location: str, unit: str = "fahrenheit") -> str: - """Return the current weather for a location as JSON.""" - api_key = os.getenv("OPENWEATHER_API_KEY") - if not api_key: - raise Exception("OPENWEATHER_API_KEY not set") +## Step 3 - Define Frontend Actions (Optional) - params = {"q": location, "appid": api_key, "units": "imperial" if unit.lower() == "fahrenheit" else "metric"} - response = httpx.get("http://api.openweathermap.org/data/2.5/weather", params=params, timeout=10.0) - response.raise_for_status() - data = response.json() - - return json.dumps({ - "location": data["name"], - "country": data["sys"]["country"], - "temperature": round(data["main"]["temp"], 1), - "conditions": data["weather"][0]["description"], - "humidity": data["main"]["humidity"], - "wind_speed": data["wind"]["speed"], - "unit": unit, - "timestamp": datetime.now(tz=timezone.utc).isoformat() - }) + + + Go to AI Agent → Actions and click Add to create a frontend action your agent can call (e.g., “Open Product,” “Start Demo,” “Book Slot”). + + + Include: +
    +
  • Display Name — Shown to users (e.g., “Open Product Page”).
  • +
  • Execution Text — How the agent describes running it (e.g., “Opening product details for the user.”).
  • +
  • Name — A unique, code‑friendly key (e.g., open_product).
  • +
  • Description — What the tool does and when to use it.
  • +
  • Parameters — JSON Schema describing inputs (the agent will fill these).
  • +
+
+ + Example parameters JSON: + +```json +{ + "type": "object", + "required": ["productId"], + "properties": { + "productId": { + "type": "string", + "description": "The internal product ID to open" + }, + "utm": { + "type": "string", + "description": "Optional tracking code" + } + } +} ``` + + + At runtime, listen for tool calls and execute them client‑side (e.g., route changes, modals, highlights). + +
--- -## Step 5 - Define the agent + task - -`src/crew_demo/config/agents.yaml` - -```yaml -assistant: - role: Friendly Conversational Assistant - goal: Help users with greetings and weather questions - backstory: > - Keep responses concise, use the get_weather tool for weather queries, - and only show the final answer (no thoughts or reasoning traces). -``` - -`src/crew_demo/config/tasks.yaml` +## Step 4 - Customize in UI Kit Builder + + From AI Agents click the variant (or Get Started) to enter UI Kit Builder. + Select Customize and Deploy. + Theme, layout, features; ensure the CrewAI agent is attached. + Use live preview to validate responses & any tool triggers. + -```yaml -conversation_task: - description: > - Respond to the user's message: {user_message} - Previous conversation context: {conversation_history} - Use the weather tool when relevant; otherwise answer directly. - expected_output: > - A helpful, natural response to the user's query - agent: assistant -``` + + } description="Embed / script" href="/ai-agents/chat-widget" horizontal /> + } href="https://www.cometchat.com/docs/ui-kit/react/ai-assistant-chat" horizontal>Pre Built UI Components + --- -## Step 6 - Wire up CrewAI - -`src/crew_demo/crew.py` - -```python -from typing import List -from crewai import Agent, Crew, Process, Task -from crewai.crew import EventListener -from crewai.agents.agent_builder.base_agent import BaseAgent -from crewai.project import CrewBase, agent, crew, task -from crewai_tools import SerperDevTool - -from crew_demo.tools.weather_tool import get_weather - -listener = EventListener() - - -@CrewBase -class CrewDemo(): - agents: List[BaseAgent] - tasks: List[Task] +## Step 5 - Export & Integrate - search_tool = SerperDevTool() - agents_config = "config/agents.yaml" - tasks_config = "config/tasks.yaml" +Choose how you’ll ship the experience (Widget or React UI Kit export). - @agent - def assistant(self) -> Agent: - return Agent( - config=self.agents_config["assistant"], # type: ignore[index] - tools=[self.search_tool, get_weather], - verbose=False, - memory=False, - ) - - @task - def conversation_task(self) -> Task: - return Task( - config=self.tasks_config["conversation_task"], # type: ignore[index] - markdown=True, - verbose=False, - ) - - @crew - def crew(self) -> Crew: - return Crew( - agents=self.agents, - tasks=self.tasks, - process=Process.sequential, - verbose=False, - stream=True, - ) -``` + + Pick Chat Widget (fastest) or export React UI Kit for code-level customization. + Open UI Kit Builder → Get Embedded Code → copy script + credentials. + Export the variant as code (UI Kit) if you need deep theming or custom logic. + Preview: the CrewAI agent should appear without extra config. + --- -## Step 7 - Expose a FastAPI endpoint with NDJSON streaming +## Step 6 - Deploy & Secure (Reference) -`src/crew_demo/main.py` (excerpt) +If you need a starter CrewAI endpoint, you can reuse the `/kickoff` pattern from your CrewAI project: ```python -import json, os, warnings -from datetime import datetime -from uuid import uuid4 -from typing import List, Optional - -import uvicorn -from fastapi import FastAPI -from fastapi.responses import StreamingResponse -from pydantic import BaseModel -from crewai.types.streaming import StreamChunk, StreamChunkType, CrewStreamingOutput - -from crew_demo.crew import CrewDemo - -warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd") -app = FastAPI() - - -class Message(BaseModel): - role: str - content: str - - -class KickoffRequest(BaseModel): - messages: List[Message] - threadId: Optional[str] = None - runId: Optional[str] = None - - -def stream_crew(inputs: dict): - message_id = str(uuid4()) - yield json.dumps({"type": "text_start", "message_id": message_id}) + "\n" - - streaming: CrewStreamingOutput = CrewDemo().crew().kickoff(inputs=inputs) - buffer, final_started = "", False - - for chunk in streaming: - if not isinstance(chunk, StreamChunk): - continue - - if chunk.chunk_type == StreamChunkType.TOOL_CALL and chunk.tool_call: - tool_call_id = str(uuid4()) - yield json.dumps({"type": "tool_call_start", "tool_call_id": tool_call_id, "name": chunk.tool_call.tool_name, "parent_message_id": message_id}) + "\n" - yield json.dumps({"type": "tool_call_args", "tool_call_id": tool_call_id, "args": json.loads(chunk.tool_call.arguments) if isinstance(chunk.tool_call.arguments, str) else chunk.tool_call.arguments}) + "\n" - yield json.dumps({"type": "tool_call_end", "tool_call_id": tool_call_id, "name": chunk.tool_call.tool_name}) + "\n" - if getattr(chunk.tool_call, "result", None): - yield json.dumps({"type": "tool_result", "tool_call_id": tool_call_id, "result": chunk.tool_call.result}) + "\n" - continue - - if chunk.chunk_type == StreamChunkType.TEXT: - buffer += chunk.content - if not final_started and "Final Answer:" in buffer: - final_started = True - final_chunk = buffer.split("Final Answer:")[-1].strip() - if final_chunk: - yield json.dumps({"type": "text_delta", "message_id": message_id, "content": final_chunk}) + "\n" - buffer = "" - elif final_started: - yield json.dumps({"type": "text_delta", "message_id": message_id, "content": chunk.content}) + "\n" - - if not final_started and buffer.strip(): - yield json.dumps({"type": "text_delta", "message_id": message_id, "content": buffer.strip()}) + "\n" - - yield json.dumps({"type": "text_end", "message_id": message_id}) + "\n" - yield json.dumps({"type": "done"}) + "\n" - - @app.post("/kickoff") async def kickoff(request: KickoffRequest): - inputs = { - "user_message": request.messages[-1].content, - "conversation_history": "\n".join([f"{m.role}: {m.content}" for m in request.messages[:-1]]), - "current_year": str(datetime.now().year), - } - return StreamingResponse(stream_crew(inputs), media_type="application/x-ndjson") - - -def run(): - uvicorn.run(app, host=os.getenv("HOST", "0.0.0.0"), port=int(os.getenv("PORT", "8000"))) - - -if __name__ == "__main__": - run() + inputs = { + "user_message": request.messages[-1].content, + "conversation_history": "\n".join([f"{m.role}: {m.content}" for m in request.messages[:-1]]), + } + return StreamingResponse(stream_crew(inputs), media_type="application/x-ndjson") ``` -Events emitted follow CometChat’s NDJSON shape: - -- Text: `text_start`, `text_delta`, `text_end` -- Tools: `tool_call_start`, `tool_call_args`, `tool_call_end`, `tool_result` -- Control: `done`, `error` - ---- - -## Step 8 - Run & test locally - -```bash -uv run crewai run # or: uv run uvicorn crew_demo.main:app --reload -curl -N -X POST http://localhost:8000/kickoff \ - -H "Content-Type: application/json" \ - -d '{"messages":[{"role":"user","content":"Hello!"}]}' -``` - -You should see NDJSON events streaming in the terminal. - ---- - -## Step 9 - Connect in CometChat - -In the CometChat Dashboard → **AI Agents → Add Agent**: - -- **Provider**: CrewAI -- **Name**: e.g., `CrewAI Weather Assistant` -- **Deployment URL**: `https://your-domain.com/kickoff` -- (Optional) **Headers**: flat JSON for auth (e.g., `{ "Authorization": "Bearer " }`) -- (Optional) Greeting, intro message, suggested prompts - -Save and toggle the agent **ON**. CometChat will stream user messages to `/kickoff` and relay NDJSON responses back to the client. - ---- - -## Step 10 - Extend your Crew - -- **Add tools**: decorate new Python functions with `@tool("name")` and register them in `crew.py`. -- **Switch LLMs**: set `ANTHROPIC_API_KEY` + `LLM_MODEL=claude-3-5-sonnet-20241022` (CrewAI auto-detects providers). -- **Multi-agent**: define multiple agents in `agents.yaml`, map tasks, and set `process=Process.sequential` or `Process.hierarchical`. -- **Structured output**: add `output_json: true` to a task to enforce JSON replies. -- **Production**: run `uvicorn crew_demo.main:app --host 0.0.0.0 --port 8000 --workers 4`, enable HTTPS, auth headers, CORS, and rate limits. - ---- - -## Step 11 - Deploy & ship UI - - - } description="Embed / script" href="/ai-agents/chat-widget" horizontal /> - } href="https://www.cometchat.com/docs/ui-kit/react/ai-assistant-chat" horizontal>Pre Built UI Components - - -> The CrewAI agent you configured is included automatically in exported variants—no extra client code needed for basic chat. +Deploy behind HTTPS, add auth headers, and keep API keys server-side. Once live, update your CometChat agent’s **Deployment URL** to point at the public endpoint and keep the toggle **ON**. From 510f89fd5869dd39da31650131ab7b8772c3c051 Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Wed, 7 Jan 2026 11:37:41 +0530 Subject: [PATCH 07/15] updates crew ai docs --- ai-agents/crew-ai-knowledge-agent.mdx | 198 ++++++++++------------- ai-agents/crew-ai-product-hunt-agent.mdx | 141 +++++++--------- 2 files changed, 145 insertions(+), 194 deletions(-) diff --git a/ai-agents/crew-ai-knowledge-agent.mdx b/ai-agents/crew-ai-knowledge-agent.mdx index c99f6bdc..47a51f6f 100644 --- a/ai-agents/crew-ai-knowledge-agent.mdx +++ b/ai-agents/crew-ai-knowledge-agent.mdx @@ -6,153 +6,133 @@ description: "Create a CrewAI knowledge agent that answers from your docs, strea import { Steps, Step } from 'mintlify'; -Imagine an agent that only answers when asked, pulls context from your docs, and responds with concise, cited answers right inside chat. +Based on the refreshed [`ai-agent-crew-ai-examples`](https://github.com/) codebase, here’s how to run the `knowledge_agent` FastAPI service, ingest docs, and stream answers into CometChat. *** ## What you’ll build -- A **CrewAI** agent scoped to documentation questions. -- A lightweight **ingest + retrieve** flow that reads files from `knowledge/`. -- A **FastAPI `/kickoff`** endpoint that streams NDJSON events CometChat consumes. -- A **CometChat AI Agent** entry pointing at your deployment. +- A **CrewAI** agent that grounds replies in your ingested docs (per namespace). +- A **FastAPI** service with ingest/search/generate endpoints plus a `/stream` SSE. +- **CometChat AI Agent** wiring that consumes newline-delimited JSON chunks (`text_delta`, `text_done`, `done`). *** ## Prerequisites -- CrewAI project (see [Create an AI Agent with CrewAI](/ai-agents/crew-ai)) -- Python 3.13.2+, `uv` or `pip` -- `OPENAI_API_KEY` in `.env` -- A CometChat app +- Python 3.10+ with `pip` (or `uv`) +- `OPENAI_API_KEY` (optionally `OPENAI_BASE_URL`, `KNOWLEDGE_OPENAI_MODEL`, `KNOWLEDGE_EMBEDDING_MODEL`) +- A CometChat app + AI Agent entry *** -## How it works - -- **Ingest**: drop markdown/text/PDF summaries into `knowledge//`. -- **Retrieve**: a CrewAI tool scans that folder, scores snippets, and returns top matches with filenames for citations. -- **Answer**: the agent replies only when explicitly mentioned (`@agent`) and always cites sources. -- **Stream**: `/kickoff` emits NDJSON events (`text_*`, `tool_*`, `done`) that CometChat renders in real time. - -*** - -## Steps +## Run the updated sample - - Create knowledge/default (or any namespace) and add markdown/text files. You can sync files from your CMS or build a simple CLI to keep this folder updated. - - - Implement a CrewAI tool that loads files from the namespace, extracts top snippets, and returns JSON with content, source, and optional score. + + In ai-agent-crew-ai-examples/: +
python3 -m venv .venv
+source .venv/bin/activate
+pip install -e .
+uvicorn knowledge_agent.main:app --host 0.0.0.0 --port 8000 --reload
+ Env supports .env at repo root or inside knowledge_agent/.env.
- - Configure the agent to respond only when mentioned and to always cite sources. Keep the backstory strict about using the retriever tool first. + + OPENAI_API_KEY is required. Optional: OPENAI_BASE_URL, KNOWLEDGE_OPENAI_MODEL (default gpt-4o-mini), KNOWLEDGE_EMBEDDING_MODEL (default text-embedding-3-small). - - Reuse the FastAPI NDJSON stream from crew-ai.mdx. Pass namespace via tool params if you support multiple doc sets. - - - Dashboard → AI Agents → Provider = CrewAI, Agent ID = knowledge, Deployment URL = your public /kickoff. + + Ingested files land in knowledge_agent/data/knowledge/<namespace>/ and embeddings persist to knowledge_agent/data/chroma/<namespace>/. Duplicate hashes are skipped automatically.
*** -## Sample retriever tool - -`src/crew_demo/tools/docs_retriever.py` - -```python -import json -from pathlib import Path -from typing import List, Dict -from crewai.tools import tool - - -def _load_documents(namespace: str) -> List[Dict[str, str]]: - base = Path(__file__).parent.parent.parent / "knowledge" / namespace - docs = [] - for path in base.rglob("*.md"): - try: - docs.append({"source": path.name, "content": path.read_text(encoding="utf-8")}) - except Exception: - continue - return docs - - -@tool("search_docs") -def search_docs(query: str, namespace: str = "default", limit: int = 3) -> str: - """Return top matching snippets from knowledge/.""" - docs = _load_documents(namespace) - scored = [] - for doc in docs: - if query.lower() in doc["content"].lower(): - scored.append({"source": doc["source"], "excerpt": doc["content"][:800]}) - top = scored[:limit] if scored else [] - return json.dumps({"matches": top, "namespace": namespace}) +## API surface (FastAPI) + +- `POST /api/tools/ingest` — accept JSON or `multipart/form-data` with `sources` (text/markdown/url) and optional file uploads; returns `saved`, `skipped`, `errors`. +- `POST /api/tools/searchDocs` — semantic search via Chroma; accepts `namespace`, `query`, `max_results`. +- `POST /api/agents/knowledge/generate` — single, non-streaming completion. +- `POST /stream` — newline-delimited JSON over SSE (`text_delta`, `text_done`, `done`) ready for CometChat BYOA. + +### Ingest examples + +```bash +curl -X POST http://localhost:8000/api/tools/ingest \ + -H "Content-Type: application/json" \ + -d '{ + "namespace": "default", + "sources": [ + { "type": "url", "value": "https://docs.crewai.com/" }, + { "type": "markdown", "title": "Notes", "value": "# CrewAI Rocks" } + ] + }' ``` -*** +Multipart uploads are also supported: -## Agent configuration +```bash +curl -X POST http://localhost:8000/api/tools/ingest \ + -H "Accept: application/json" \ + -F "namespace=default" \ + -F "sources=[{\"type\":\"text\",\"value\":\"Hello\"}]" \ + -F "files=@/path/to/file.pdf" +``` -`src/crew_demo/config/agents.yaml` +### Search + answer -```yaml -knowledge: - role: Knowledge Retrieval Specialist - goal: Answer questions using retrieved docs only - backstory: > - Respond only when mentioned (e.g., @agent). - Always call the search_docs tool first, then compose a concise answer with a Sources list. - Never reveal internal reasoning or tool output verbatim. +```bash +curl -X POST http://localhost:8000/api/tools/searchDocs \ + -H "Content-Type: application/json" \ + -d '{"namespace":"default","query":"CrewAI agent lifecycle","max_results":4}' ``` -`src/crew_demo/config/tasks.yaml` - -```yaml -knowledge_task: - description: > - Use search_docs to find relevant context for: {user_message} - Namespace: {namespace} - expected_output: > - A short answer plus a "Sources:" list. - agent: knowledge +```bash +curl -N http://localhost:8000/stream \ + -H "Content-Type: application/json" \ + -d '{ + "thread_id": "thread_1", + "run_id": "run_001", + "messages": [ + { "role": "user", "content": "Summarize the CrewAI agent lifecycle." } + ] + }' ``` -Register the tool in `crew.py`: +Streaming payload shape: -```python -from crew_demo.tools.docs_retriever import search_docs - -@agent -def knowledge(self) -> Agent: - return Agent( - config=self.agents_config["knowledge"], # type: ignore[index] - tools=[search_docs], - verbose=False, - memory=False, - ) +```json +{"type":"text_delta","content":"...","thread_id":"...","run_id":"..."} +{"type":"text_done","thread_id":"...","run_id":"..."} +{"type":"done","thread_id":"...","run_id":"..."} ``` *** -## CometChat setup +## Crew internals (for reference) - - Open CometChat Dashboard → AI Agents. - Provider = CrewAI, Agent ID = knowledge, Deployment URL = your /kickoff. - Add greeting/intro and suggested prompts like “@agent What’s our refund policy?” - Save and toggle the agent ON. - +`knowledge_agent/knowledge_manager.py` builds a search tool per namespace, wired into a CrewAI agent: -*** +```python +search_tool = self._create_search_tool(normalised) +agent = Agent( + role="Knowledge Librarian", + goal="Answer user questions with relevant citations from the knowledge base.", + tools=[search_tool], + llm=model, +) +task = Task( + description="Use search_knowledge_base before answering.\nConversation: {conversation}\nLatest: {question}", + expected_output="A concise, cited answer grounded in ingested docs.", + agent=agent, +) +crew = Crew(agents=[agent], tasks=[task], process=Process.sequential) +``` -## Tips +*** -- Add basic file filters (size/type) before ingesting to keep retrieval fast. -- If you need embeddings/vector search, swap the retriever implementation; CometChat only expects the NDJSON stream, not the retrieval method. -- Use `output_json: true` on the task if you want fully structured answers. +## Wire it to CometChat ---- +- Dashboard → AI Agents → **Provider: CrewAI** → **Agent ID** (e.g., `knowledge`) → **Deployment URL** = your public `/stream`. +- The SSE stream is newline-delimited JSON; CometChat AG-UI adapters can parse `text_delta`/`text_done` to render partials and stop on `done`. +- Use namespaces to keep customer/workspace data separate; pass `namespace` in the payload or inside `tool_params.namespace`. +- Keep secrets server-side; add auth headers on the FastAPI route if needed. diff --git a/ai-agents/crew-ai-product-hunt-agent.mdx b/ai-agents/crew-ai-product-hunt-agent.mdx index 92f07b2d..c3a5c5e8 100644 --- a/ai-agents/crew-ai-product-hunt-agent.mdx +++ b/ai-agents/crew-ai-product-hunt-agent.mdx @@ -6,122 +6,93 @@ description: "Create a CrewAI agent that fetches Product Hunt posts, answers lau import { Steps, Step } from 'mintlify'; -Give your chats Product Hunt superpowers: search launches, surface top posts, and celebrate wins with a UI action. +Refreshed for the latest [`ai-agent-crew-ai-examples`](https://github.com/) codebase: run the `product_hunt_agent` FastAPI service, wire it to CometChat, and stream SSE updates (with optional confetti actions). --- ## What you’ll build -- A CrewAI agent with tools to **get top posts**, **search**, and **trigger a frontend action** (confetti). -- A FastAPI `/kickoff` endpoint streaming NDJSON for CometChat. -- Optional static page or widget that maps the confetti tool to a UI handler. +- A CrewAI agent with tools to **get top posts**, **search**, **timeframes**, and **trigger confetti**. +- A FastAPI `/stream` endpoint emitting newline-delimited JSON (`text_delta`, `text_done`, `done`). +- CometChat AI Agent wiring that consumes those SSE chunks; your UI listens for the confetti payload. --- ## Prerequisites -- Node or Python hosting for your CrewAI service -- `OPENAI_API_KEY` for chat -- `PRODUCTHUNT_API_TOKEN` (GraphQL) for live data +- Python 3.10+ with `pip` +- `OPENAI_API_KEY` (optionally `OPENAI_BASE_URL`, `PRODUCT_OPENAI_MODEL`) +- Optional: `PRODUCTHUNT_API_TOKEN` for live GraphQL data (empty lists when missing) - CometChat app + AI Agent entry --- -## How it works - -- **Agent** — configure tools for top lists, timeframe queries, search, and confetti; clamp limits and validate inputs. -- **Data** — Product Hunt GraphQL for live votes (needs `PRODUCTHUNT_API_TOKEN`) and Algolia search for discovery. -- **API** — expose FastAPI (or similar) routes for top/search plus a `/kickoff` chat endpoint that streams NDJSON (`text_delta`, `tool_*`, `text_end`, `done`). -- **Frontend** — when the confetti tool fires, your UI listens to `tool_result` and triggers the celebration handler. - ---- - -## Steps +## Run the updated sample - - Create tools for get_top_products, search_products, and get_top_by_timeframe. Require PRODUCTHUNT_API_TOKEN and validate inputs. - - - Include a simple action tool (see frontend actions guide) to trigger celebration in the UI. + + In ai-agent-crew-ai-examples/: +
python3 -m venv .venv
+source .venv/bin/activate
+pip install -e .
+uvicorn product_hunt_agent.main:app --host 0.0.0.0 --port 8001 --reload
- - Explain when to use each tool, how to summarize results in tables, and when to celebrate. - - - Reuse the NDJSON streaming pattern from crew-ai.mdx. - - - Provider = CrewAI, Agent ID = product_hunt, Deployment URL = your public endpoint. + + Required: OPENAI_API_KEY. Optional: PRODUCTHUNT_API_TOKEN (GraphQL), PRODUCTHUNT_DEFAULT_TIMEZONE (default America/New_York).
---- - -## Sample tool (top products) - -`src/crew_demo/tools/product_hunt.py` +*** -```python -import os, json, httpx -from crewai.tools import tool - - -PRODUCT_HUNT_API = "https://api.producthunt.com/v2/api/graphql" -HEADERS = lambda: {"Authorization": f"Bearer {os.getenv('PRODUCTHUNT_API_TOKEN')}"} - - -@tool("get_top_products") -def get_top_products(limit: int = 3) -> str: - """Fetch top Product Hunt posts by votes.""" - if not os.getenv("PRODUCTHUNT_API_TOKEN"): - raise Exception("PRODUCTHUNT_API_TOKEN not set") - - query = """ - query TopProducts($limit: Int!) { - posts(order: VOTES, first: $limit) { - edges { node { name tagline votesCount url } } - } - } - """ - resp = httpx.post(PRODUCT_HUNT_API, headers=HEADERS(), json={"query": query, "variables": {"limit": limit}}, timeout=10.0) - resp.raise_for_status() - data = resp.json()["data"]["posts"]["edges"] - posts = [{"name": edge["node"]["name"], "tagline": edge["node"]["tagline"], "votes": edge["node"]["votesCount"], "url": edge["node"]["url"]} for edge in data] - return json.dumps({"posts": posts}) -``` +## API surface (FastAPI) -Register the tool and combine it with a confetti action in `crew.py`. +- `GET /api/top` — top posts by votes (`limit` 1–10). +- `GET /api/top-week` — rolling window (default 7 days) with `limit` and `days`. +- `GET /api/top-range` — timeframe queries (`timeframe`, `tz`, `limit`); supports `"today"`, `"yesterday"`, `"last_week"`, `"last_month"`, or ISO dates. +- `GET /api/search` — Algolia search (`q`, `limit`). +- `POST /api/chat` — non-streaming CrewAI answer. +- `POST /stream` — SSE stream (`text_delta`, `text_done`, `done`) ready for CometChat. ---- +### Streaming example -## Agent configuration +```bash +curl -N http://localhost:8001/stream \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "user", "content": "What were the top launches last week?" } + ] + }' +``` -`src/crew_demo/config/agents.yaml` +Streaming payload shape: -```yaml -product_hunt: - role: Product Hunt Launch Assistant - goal: Fetch Product Hunt data and celebrate launches - backstory: > - Use get_top_products for rankings and a search tool for queries. - Present answers as bullet points or compact tables. Fire confetti on wins when asked. +```json +{"type":"text_delta","content":"...","thread_id":"...","run_id":"..."} +{"type":"text_done","thread_id":"...","run_id":"..."} +{"type":"done","thread_id":"...","run_id":"..."} ``` ---- +*** -## Frontend handling +## Crew internals (for reference) -- Map `tool_result` for the confetti action to your UI handler (Widget/React UI Kit). -- Clamp limits (e.g., max 10 posts) in the tool to avoid huge responses. -- Never expose API tokens to the client—keep calls server-side. +Key tools in `product_hunt_agent/agent_builder.py`: ---- +```python +@tool("getTopProducts") # votes-ranked, clamps limit 1-10 +@tool("getTopProductsThisWeek") # rolling-week window, clamps days/limit +@tool("getTopProductsByTimeframe") # "today", "yesterday", "last_week", ISO, ranges +@tool("searchProducts") # Algolia search (no token needed) +@tool("triggerConfetti") # returns payload: colors, counts, spread, etc. +``` -## Troubleshooting +All tools run server-side; if `PRODUCTHUNT_API_TOKEN` is missing, top queries return empty arrays but still respond cleanly. -- Empty posts: confirm `PRODUCTHUNT_API_TOKEN` and check rate limits. -- Confetti not firing: ensure the tool name matches your client handler mapping. -- Slow responses: cache popular queries or reduce `limit`. +*** ---- +## Wire it to CometChat + +- Dashboard → AI Agents → **Provider: CrewAI** → **Agent ID** (e.g., `product_hunt`) → **Deployment URL** = your public `/stream`. +- Listen for `text_delta`/`text_done` to render streaming text; stop on `done`. +- When `triggerConfetti` returns, map the payload to your UI handler (Widget/React UI Kit). Keep API tokens server-side. From 96478b6e29408a6c90ee53f3db121d9d7c0ce69c Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Tue, 13 Jan 2026 20:03:04 +0530 Subject: [PATCH 08/15] deletes additional docs --- .../crew-ai-agents-playbook.mdx | 73 - .../crew-ai-backend-tools-agent.mdx | 122 -- .../crew-ai-chef-agent.mdx | 101 -- .../crew-ai-coordinator-agent.mdx | 121 -- .../crew-ai-frontend-actions-agent.mdx | 119 -- .../crew-ai-group-chat-agent.mdx | 93 -- .../crew-ai-handoff-agent.mdx | 106 -- .../crew-ai-knowlege-agent-pdf.mdx | 88 -- .../crew-ai-orchestrator-agent.mdx | 81 -- .../crew-ai-additional docs/crew-ai.text | 1246 ----------------- 10 files changed, 2150 deletions(-) delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai-agents-playbook.mdx delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai-backend-tools-agent.mdx delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai-chef-agent.mdx delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai-coordinator-agent.mdx delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai-frontend-actions-agent.mdx delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai-group-chat-agent.mdx delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai-handoff-agent.mdx delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai-knowlege-agent-pdf.mdx delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai-orchestrator-agent.mdx delete mode 100644 ai-agents/crew-ai-additional docs/crew-ai.text diff --git a/ai-agents/crew-ai-additional docs/crew-ai-agents-playbook.mdx b/ai-agents/crew-ai-additional docs/crew-ai-agents-playbook.mdx deleted file mode 100644 index dd6708a6..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai-agents-playbook.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "CrewAI Agents Playbook" -description: "Guides for six core CrewAI agents: Knowledge Retrieval, Frontend Action, Backend Tool, Handoff, Relay/Coordinator, and Group Chat." ---- - -# Knowledge Retrieval Agent - -Build a retrieval-first agent that answers questions from your docs with citations. - - - Build & integrate the knowledge retrieval agent. - - ---- - -# Frontend Actions Agent - -Emit structured UI actions (navigate, openModal, toggleTheme) that your frontend executes safely. - - - Return safe UI actions from the agent. - - ---- - -# Backend Tools Agent - -Let the agent call backend tools or APIs (weather, CRM, ticketing) and stream results back. - - - Wire backend tools & external APIs. - - ---- - -# Human Handoff Agent - -Escalate gracefully when a human is needed by invoking a `handoff` tool with target + reason. - - - Escalate to humans with context. - - ---- - -# Multi-agent Orchestration (Coordinator) Agent - -Fan out to multiple sub-agents (billing, support, research) and return a consolidated answer. - - - Coordinate across specialized agents. - - ---- - -# Group Chat Agent - -An agent that lives in group chats and only responds when explicitly mentioned (e.g., `@agent`). - - - Mention-aware agent for group rooms. - - ---- - -## Next Steps - -* Combine these agents inside one CrewAI project. -* Deploy to your preferred host and connect them via **CometChat Dashboard → AI Agents**. -* Layer guardrails, retrieval, and workflows as needed. -* Share the playbook with your team so they can copy-paste working agent setups. - ---- diff --git a/ai-agents/crew-ai-additional docs/crew-ai-backend-tools-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-backend-tools-agent.mdx deleted file mode 100644 index 94324bf6..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai-backend-tools-agent.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: "Backend Tools Agent with CrewAI" -sidebarTitle: "Backend Tools Agent" -description: "Let a CrewAI agent call backend APIs (CRM, weather, ticketing) and stream results to CometChat via NDJSON." ---- - -import { Steps, Step } from 'mintlify'; - -Put your APIs behind controlled tools so the agent can fetch data or perform actions while CometChat streams results live. - -*** - -## What you’ll build - -- A CrewAI agent configured with one or more **backend tools**. -- Tool functions that validate inputs, call external APIs, and raise exceptions on failure. -- A `/kickoff` endpoint that surfaces tool call events to CometChat. - -*** - -## Prerequisites - -- CrewAI project from [crew-ai.mdx](/ai-agents/crew-ai) -- API keys for any services you call (store them in `.env`) - -*** - -## Steps - - - - Keep each tool focused (single responsibility) and add clear docstrings so the LLM knows when to call them. - - - Check for required fields and raise exceptions for bad data—don’t return error strings. - - - Add tools to the agent in crew.py and set process=Process.sequential or another flow as needed. - - - Reuse the NDJSON stream from crew-ai.mdx so CometChat can render tool progress. - - - -*** - -## Sample tool: fetch deals from a CRM - -`src/crew_demo/tools/get_deals.py` - -```python -import os, httpx, json -from crewai.tools import tool - - -@tool("get_recent_deals") -def get_recent_deals(limit: int = 5) -> str: - """Fetch recent deals from the CRM.""" - api_key = os.getenv("CRM_API_KEY") - base_url = os.getenv("CRM_BASE_URL", "https://api.example-crm.com") - if not api_key: - raise Exception("CRM_API_KEY not set") - - resp = httpx.get(f"{base_url}/deals", params={"limit": limit}, headers={"Authorization": f"Bearer {api_key}"}, timeout=10.0) - resp.raise_for_status() - deals = resp.json().get("deals", []) - return json.dumps({"deals": deals[:limit]}) -``` - -Register in `crew.py`: - -```python -from crew_demo.tools.get_deals import get_recent_deals - -@agent -def backend(self) -> Agent: - return Agent( - config=self.agents_config["backend"], # type: ignore[index] - tools=[get_recent_deals], - verbose=False, - memory=False, - ) -``` - -*** - -## Agent/task configuration - -`src/crew_demo/config/agents.yaml` - -```yaml -backend: - role: Backend Integrations Agent - goal: Call approved APIs and summarize results - backstory: > - Always validate inputs, call the correct tool, and summarize results clearly. - Never expose raw errors—raise exceptions and let the server handle them. -``` - -`src/crew_demo/config/tasks.yaml` - -```yaml -backend_task: - description: > - Use the appropriate tool to answer: {user_message} - expected_output: > - A short summary plus any key fields (totals, status, links). - agent: backend -``` - -*** - -## Connect to CometChat - -- Provider: **CrewAI** -- Agent ID: `backend` -- Deployment URL: your public `/kickoff` -- Optional headers: `{ "Authorization": "Bearer " }` - -Ensure your FastAPI service returns NDJSON with tool events so users can see when an API call is running. - ---- diff --git a/ai-agents/crew-ai-additional docs/crew-ai-chef-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-chef-agent.mdx deleted file mode 100644 index 94407416..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai-chef-agent.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: "Chef Agent with CrewAI" -sidebarTitle: "Chef Agent" -description: "Build a CrewAI recipe assistant that plans meals, checks pantry items, and can trigger frontend actions (e.g., show shopping list)." ---- - -import { Steps, Step } from 'mintlify'; - -Delight users with a culinary copilot that suggests recipes, checks pantry items via tools, and returns structured shopping lists. - -*** - -## What you’ll build - -- A CrewAI agent with cooking-focused instructions. -- Tools for pantry lookup and recipe generation. -- Optional frontend action that displays a shopping list in your UI. - -*** - -## Prerequisites - -- CrewAI project ([crew-ai.mdx](/ai-agents/crew-ai)) -- Any ingredient/recipe API keys if you use a third-party source - -*** - -## Steps - - - - Provide a tool to check ingredient availability (local DB or API). Return structured data for the agent. - - - Let the LLM craft recipes using the pantry results. Keep responses concise with steps + ingredient list. - - - Emit a UI action (see frontend actions guide) so your app can show a list or start a checkout flow. - - - -*** - -## Sample tools - -`src/crew_demo/tools/pantry.py` - -```python -import json -from crewai.tools import tool - -PANTRY = {"eggs": 6, "milk": 1, "flour": 500} # grams or units - - -@tool("check_pantry") -def check_pantry(items: str) -> str: - """Return which requested items exist in the pantry.""" - requested = [i.strip().lower() for i in items.split(",") if i.strip()] - found = {item: PANTRY.get(item, 0) for item in requested} - return json.dumps({"items": found}) -``` - -`src/crew_demo/tools/shopping_list.py` - -```python -import json -from crewai.tools import tool - - -@tool("show_shopping_list") -def show_shopping_list(items: str) -> str: - """Return a shopping list action for the frontend.""" - return json.dumps({ - "action": "show_shopping_list", - "parameters": {"items": [i.strip() for i in items.split(",") if i.strip()]} - }) -``` - -*** - -## Agent configuration - -`src/crew_demo/config/agents.yaml` - -```yaml -chef: - role: Culinary Assistant - goal: Propose recipes users can cook with available ingredients - backstory: > - First check pantry items with check_pantry. - Suggest a recipe with steps and a short shopping list if needed. - If asked, trigger show_shopping_list with the missing items. -``` - ---- - -## CometChat setup - -Provider: **CrewAI**, Agent ID: `chef`, Deployment URL: `/kickoff`. Provide suggested prompts like “What can I make with eggs, milk, and flour?”. - ---- diff --git a/ai-agents/crew-ai-additional docs/crew-ai-coordinator-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-coordinator-agent.mdx deleted file mode 100644 index 1a7f2a10..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai-coordinator-agent.mdx +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: "Coordinator / Relay Agent with CrewAI" -sidebarTitle: "Coordinator Agent" -description: "Create a CrewAI coordinator that fans out to specialized agents and merges their answers before streaming to CometChat." ---- - -import { Steps, Step } from 'mintlify'; - -Handle complex questions by routing to the right specialist (billing, support, research) and returning a single, well-structured answer. - -*** - -## What you’ll build - -- Multiple CrewAI agents (specialists) plus a **coordinator** agent. -- A workflow that sequences or hierarchically calls those agents. -- NDJSON streaming so CometChat shows progress as the coordinator works. - -*** - -## Prerequisites - -- CrewAI project ([crew-ai.mdx](/ai-agents/crew-ai)) -- Agent configs for each specialist (billing/support/research/etc.) - -*** - -## Steps - - - - Add agents like billing, support, and research with narrow goals and tools. - - - The coordinator inspects the user question, decides which specialists to call, and merges their outputs. - - - Use Process.sequential for simple fan-out or Process.hierarchical if you want dynamic branching. - - - Keep the existing `/kickoff` NDJSON stream; tool calls from sub-agents will appear in CometChat. - - - -*** - -## Example configuration - -`src/crew_demo/config/agents.yaml` - -```yaml -billing: - role: Billing Specialist - goal: Handle invoices and refunds - backstory: > - Use billing tools only. Escalate if payment methods need human approval. - -support: - role: Support Specialist - goal: Troubleshoot product issues - backstory: > - Ask clarifying questions and suggest next steps using available tools. - -coordinator: - role: Relay Coordinator - goal: Route the request to the right specialist and return a concise summary - backstory: > - Decide which specialist to invoke. Combine their findings into one clear response. -``` - -`src/crew_demo/crew.py` (excerpt) - -```python -from crew_demo.tools.get_deals import get_recent_deals - -@agent -def billing(self) -> Agent: - return Agent(config=self.agents_config["billing"], tools=[get_recent_deals], verbose=False) - -@agent -def support(self) -> Agent: - return Agent(config=self.agents_config["support"], tools=[], verbose=False) - -@agent -def coordinator(self) -> Agent: - return Agent(config=self.agents_config["coordinator"], tools=[], verbose=False) - -@crew -def crew(self) -> Crew: - return Crew( - agents=[self.coordinator(), self.billing(), self.support()], - tasks=self.tasks, - process=Process.sequential, # swap to Process.hierarchical for dynamic routing - verbose=False, - stream=True, - ) -``` - -`src/crew_demo/config/tasks.yaml` - -```yaml -coordination_task: - description: > - Decide which specialist should handle: {user_message} - Share a merged answer back to the user. - expected_output: > - A concise response summarizing each specialist's findings. - agent: coordinator -``` - -*** - -## CometChat setup - -- Provider: **CrewAI** -- Agent ID: `coordinator` -- Deployment URL: `/kickoff` - -Use suggested prompts like “Ask billing if invoice #123 is paid and summarize it” to exercise multi-agent flows. - ---- diff --git a/ai-agents/crew-ai-additional docs/crew-ai-frontend-actions-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-frontend-actions-agent.mdx deleted file mode 100644 index 4147d01e..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai-frontend-actions-agent.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: "Frontend Actions Agent with CrewAI" -sidebarTitle: "Frontend Actions Agent" -description: "Let a CrewAI agent emit safe UI actions (navigation, modals, confetti) that your frontend executes when running inside CometChat." ---- - -import { Steps, Step } from 'mintlify'; - -Give your chat experience UI superpowers: the agent replies with structured actions your frontend can run (e.g., open a product, fire confetti, toggle a theme). - -*** - -## What you’ll build - -- A CrewAI agent that returns **JSON actions** instead of calling backend APIs. -- A tool definition that keeps action payloads predictable. -- A `/kickoff` stream that includes `tool_*` events so the client can map actions. -- A CometChat AI Agent entry that routes traffic to this CrewAI service. - -*** - -## Prerequisites - -- CrewAI project + FastAPI stream from [crew-ai.mdx](/ai-agents/crew-ai) -- Frontend capable of handling action payloads (UI Kit export or custom UI) -- Optional: `canvas-confetti` or your own action handlers - -*** - -## Steps - - - - Decide on a small set of actions your UI will accept (e.g., open_product, show_confetti, toggle_theme). - - - Return structured JSON with action name and parameters. Keep the schema narrow to avoid unsafe payloads. - - - In the backstory, explain when to trigger each action and to keep text responses short when actions are returned. - - - In your widget or UI Kit export, map `tool_call_*` + `tool_result` events to actual UI functions (e.g., fire confetti, navigate). - - - -*** - -## Sample action tool - -`src/crew_demo/tools/confetti_action.py` - -```python -import json -from crewai.tools import tool - - -@tool("trigger_confetti") -def trigger_confetti(celebration: str = "default") -> str: - """Return a confetti action payload for the frontend.""" - return json.dumps({ - "action": "show_confetti", - "parameters": { - "preset": celebration, - "particleCount": 150, - "spread": 70 - } - }) -``` - -Register in `crew.py` and add to the agent’s tool list alongside any other actions. - -*** - -## Agent configuration - -`src/crew_demo/config/agents.yaml` - -```yaml -ui_assistant: - role: Frontend Action Agent - goal: Trigger approved UI actions and keep responses concise - backstory: > - When a user asks to celebrate, call trigger_confetti and include a short celebratory message. - When a user asks to open a product, return an action with name=open_product and include productId. - Never invent action names outside the approved list. Keep explanations brief. -``` - -`src/crew_demo/config/tasks.yaml` - -```yaml -ui_task: - description: > - Decide whether to run a UI action for: {user_message} - expected_output: > - Either a short text response or a tool-triggered action. - agent: ui_assistant -``` - -*** - -## Client-side handling - -- Listen for `tool_call_start`/`tool_call_args` to show “running action…” states. -- Parse `tool_result` JSON and map the `action` name to a handler. -- Keep handlers idempotent and validate parameters on the client before executing. - -*** - -## Connect to CometChat - -Use the same `/kickoff` endpoint as your CrewAI project. In Dashboard → AI Agents: - -- Provider: **CrewAI** -- Agent ID: `ui_assistant` -- Deployment URL: your `/kickoff` -- (Optional) Suggested prompts: “Launch confetti”, “Open the product page for sku-123” - ---- diff --git a/ai-agents/crew-ai-additional docs/crew-ai-group-chat-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-group-chat-agent.mdx deleted file mode 100644 index 1c75f007..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai-group-chat-agent.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: "Group Chat Agent with CrewAI" -sidebarTitle: "Group Chat Agent" -description: "Create a CrewAI agent that lives in group rooms, responds only when mentioned, and streams NDJSON to CometChat." ---- - -import { Steps, Step } from 'mintlify'; - -Keep group chats clean: the agent replies only when explicitly tagged, includes mentions in its response, and keeps context short. - -*** - -## What you’ll build - -- A CrewAI agent that checks for mentions (e.g., `@agent`). -- Backstory rules to ignore chatter unless addressed. -- A `/kickoff` stream that returns concise answers and optional tool calls. - -*** - -## Prerequisites - -- CrewAI project and `/kickoff` endpoint from [crew-ai.mdx](/ai-agents/crew-ai) -- Client ability to pass conversation history and mention metadata - -*** - -## Steps - - - - Decide on a handle (e.g., @agent or @support) that users will type in group rooms. - - - Pass a boolean or mention list into the task input so the agent knows when it is addressed. - - - In the backstory, limit reply length and avoid re-quoting the entire thread. - - - Point your AI Agent to the CrewAI `/kickoff` URL and set a friendly display name/icon. - - - -*** - -## Agent configuration - -`src/crew_demo/config/agents.yaml` - -```yaml -group_agent: - role: Group Chat Assistant - goal: Reply only when mentioned and keep messages concise - backstory: > - Respond only if the latest message contains "@agent" (case-insensitive). - Keep replies under 4 sentences. If not mentioned, politely stay silent. -``` - -`src/crew_demo/config/tasks.yaml` - -```yaml -group_task: - description: > - If mentioned: respond to {user_message}. Otherwise return an empty reply. - Conversation: {conversation_history} - expected_output: > - A short answer or an empty string when not mentioned. - agent: group_agent -``` - -In your FastAPI kickoff, include a flag: - -```python -inputs = { - "user_message": request.messages[-1].content, - "conversation_history": "\n".join([...]), - "mentioned": "@agent" in request.messages[-1].content.lower(), -} -``` - -The task can check `mentioned` in its prompt and return an empty string to suppress replies. - -*** - -## CometChat setup - -- Provider: **CrewAI** -- Agent ID: `group_agent` -- Deployment URL: `/kickoff` -- Suggested prompts: “@agent what is our refund policy?” - ---- diff --git a/ai-agents/crew-ai-additional docs/crew-ai-handoff-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-handoff-agent.mdx deleted file mode 100644 index 659be52b..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai-handoff-agent.mdx +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: "Human Handoff Agent with CrewAI" -sidebarTitle: "Handoff Agent" -description: "Build a CrewAI agent that knows its limits and escalates to humans with a clear reason and target." ---- - -import { Steps, Step } from 'mintlify'; - -Teach the agent to say “I need a human” and emit a structured handoff payload CometChat can route to your team. - -*** - -## What you’ll build - -- A CrewAI tool that emits a `handoff` payload (reason + target + context). -- Agent instructions that decide when to escalate. -- NDJSON streaming so CometChat can display escalation status in real time. - -*** - -## Prerequisites - -- CrewAI project + `/kickoff` streaming endpoint ([crew-ai.mdx](/ai-agents/crew-ai)) -- A human support flow in your product (ticketing, live agent queue, etc.) - -*** - -## Steps - - - - Decide where to route: support, sales, billing, or a specific user ID. - - - Return a JSON payload with target, reason, and optional priority. Raise exceptions if required fields are missing. - - - In the backstory, describe clear thresholds for escalation (e.g., compliance, billing disputes, missing permissions). - - - When CometChat receives a `tool_result` for `handoff`, trigger your own UI/notification flow. - - - -*** - -## Sample handoff tool - -`src/crew_demo/tools/handoff.py` - -```python -import json -from crewai.tools import tool - - -@tool("handoff") -def handoff(target: str, reason: str, priority: str = "normal") -> str: - """Escalate to a human with routing details.""" - if not target or not reason: - raise Exception("target and reason are required for handoff") - return json.dumps({ - "action": "handoff", - "target": target, - "reason": reason, - "priority": priority - }) -``` - -Add to your agent’s tool list in `crew.py`. - -*** - -## Agent configuration - -`src/crew_demo/config/agents.yaml` - -```yaml -handoff: - role: Escalation Specialist - goal: Detect when a human is needed and route correctly - backstory: > - If the request involves account cancellation, payments, or legal topics, call the handoff tool. - Explain briefly why escalation is required. Keep user-facing text short and polite. -``` - -`src/crew_demo/config/tasks.yaml` - -```yaml -handoff_task: - description: > - Decide whether to answer or escalate: {user_message} - expected_output: > - If escalation is required, trigger the handoff tool with target + reason. - agent: handoff -``` - -*** - -## CometChat integration - -- Provider: **CrewAI** -- Agent ID: `handoff` -- Deployment URL: your `/kickoff` -- Client: map the `handoff` tool payload to your routing layer (open a ticket, page on-call, or DM a user). - ---- diff --git a/ai-agents/crew-ai-additional docs/crew-ai-knowlege-agent-pdf.mdx b/ai-agents/crew-ai-additional docs/crew-ai-knowlege-agent-pdf.mdx deleted file mode 100644 index e10be2be..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai-knowlege-agent-pdf.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "PDF Knowledge Agent with CrewAI" -sidebarTitle: "PDF Knowledge Agent" -description: "Ingest PDFs, extract text, and let a CrewAI agent answer questions with citations while streaming to CometChat." ---- - -import { Steps, Step } from 'mintlify'; - -Turn PDF handbooks into an on-demand assistant. The agent ingests PDFs, retrieves relevant chunks, and answers with citations. - -*** - -## What you’ll build - -- A PDF ingestion routine that extracts text into `knowledge//`. -- CrewAI tools to ingest and retrieve PDF context. -- An agent that cites sources and only responds when invoked. - -*** - -## Prerequisites - -- CrewAI project from [crew-ai.mdx](/ai-agents/crew-ai) -- `pypdf` or `pdfplumber` installed for extraction (`uv add pypdf`) - -*** - -## Steps - - - - Convert PDFs into markdown/text and save under knowledge//. Store filenames for citations. - - - ingest_pdf: accept a file path/URL, extract text, save to knowledge folder. - retrieve_pdf_context: search extracted text for a query and return top chunks with source names. - - - Force a retrieval call before answering and append a “Sources” list. - - - Use the same NDJSON pipeline; include the namespace or PDF title in tool args for clarity. - - - -*** - -## Sample retrieval tool - -`src/crew_demo/tools/retrieve_pdf_context.py` - -```python -import json -from pathlib import Path -from crewai.tools import tool - - -@tool("retrieve_pdf_context") -def retrieve_pdf_context(query: str, namespace: str = "pdf") -> str: - """Retrieve text snippets from extracted PDFs.""" - base = Path(__file__).parent.parent.parent / "knowledge" / namespace - matches = [] - for path in base.glob("*.txt"): - content = path.read_text(encoding="utf-8") - if query.lower() in content.lower(): - matches.append({"source": path.name, "excerpt": content[:800]}) - return json.dumps({"matches": matches[:3], "namespace": namespace}) -``` - -Pair this with an ingestion script that turns PDFs into `.txt` files in the same namespace. - -*** - -## Agent prompt essentials - -- Always call `retrieve_pdf_context` first. -- Answer only from retrieved excerpts; if nothing is found, say so. -- Cite sources as `Sources: file-one.txt, file-two.txt`. - -*** - -## CometChat setup - -Provider: **CrewAI**, Agent ID: `pdf_agent`, Deployment URL: `/kickoff`. - -Suggested prompts: “@pdf_agent What does the onboarding guide say about PTO?”. - ---- diff --git a/ai-agents/crew-ai-additional docs/crew-ai-orchestrator-agent.mdx b/ai-agents/crew-ai-additional docs/crew-ai-orchestrator-agent.mdx deleted file mode 100644 index 83bceda3..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai-orchestrator-agent.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: "Orchestrator Agent with CrewAI" -sidebarTitle: "Orchestrator Agent" -description: "Use CrewAI to orchestrate multiple agents and tools, enforce steps, and return a single streamed answer to CometChat." ---- - -import { Steps, Step } from 'mintlify'; - -When requests require multiple skills—research, calculations, approvals—an orchestrator coordinates the flow and keeps users updated through streaming events. - -*** - -## What you’ll build - -- An orchestrator agent that plans steps, invokes sub-agents/tools, and composes the final reply. -- Tasks that enforce ordering (e.g., research → summarize → verify). -- NDJSON streaming so users see progress. - -*** - -## Prerequisites - -- CrewAI multi-agent setup from [crew-ai.mdx](/ai-agents/crew-ai) -- At least two specialized agents or tools to orchestrate - -*** - -## Steps - - - - Define the stages your orchestrator must follow (e.g., gather context → call tool → summarize). - - - In the backstory, describe exactly when to call each sub-agent/tool and how to merge outputs. - - - Keep stream=True on the Crew so CometChat receives progress as steps complete. - - - Try multi-part requests to ensure the orchestrator doesn’t skip steps. - - - -*** - -## Example backstory - -`src/crew_demo/config/agents.yaml` - -```yaml -orchestrator: - role: Orchestration Lead - goal: Plan and execute the right sequence of tools/agents - backstory: > - Always outline a short plan, then execute tools or call specialists as needed. - Merge their findings into one concise answer. Do not expose raw plan text to the user. -``` - -`src/crew_demo/config/tasks.yaml` - -```yaml -orchestrator_task: - description: > - Plan the best sequence to solve: {user_message} - expected_output: > - A clear answer plus any key data. Mention which tools were used. - agent: orchestrator -``` - -Use `Process.hierarchical` if you want the orchestrator to decide dynamically which sub-agent to call next. - -*** - -## CometChat setup - -Provider: **CrewAI**, Agent ID: `orchestrator`, Deployment URL: `/kickoff`. - -Add suggested prompts like “Find our three latest deals, then draft a summary for the customer” to exercise orchestration. - ---- diff --git a/ai-agents/crew-ai-additional docs/crew-ai.text b/ai-agents/crew-ai-additional docs/crew-ai.text deleted file mode 100644 index a0ae1f7e..00000000 --- a/ai-agents/crew-ai-additional docs/crew-ai.text +++ /dev/null @@ -1,1246 +0,0 @@ -Integrating CrewAI agent with CometChat - -CrewAI CometChat Integration Guide - -The CometChat Agentic Interface enables developers and product owners to connect their AI agents, built on platforms like Agno, AG2, Vercel AI SDK, Mastra, Rasa, CrewAI, or custom frameworks, directly with their end users through CometChat. - -This approach follows a Bring Your Own Agent (BYOA) model where you build and host your own agent, and CometChat provides the secure communication layer, authentication, and real-time delivery between users and agents. - -This guide explains how to integrate a CrewAI agent with CometChat using Python and FastAPI. - -Overview - -The CometChat Agentic Platform operates over HTTP with streaming support. You expose an HTTP endpoint where CometChat sends user messages and receives responses from your agent. Your CrewAI agent handles the AI logic and tool execution, while your FastAPI server manages the HTTP layer and newline-delimited JSON (NDJSON) streaming. - -Once configured, your CrewAI Agent will be able to: - -Receive and respond to CometChat user messages - -Process inputs using CrewAI's agent framework - -Execute tools and functions (you can define any number of tools) - -Stream responses back in real-time to CometChat clients - -Prerequisites - -Before starting, ensure you have: - -Python 3.13.2 or higher installed - -A working knowledge of CrewAI (v1.7.2) - -A CometChat account (available at https://app.cometchat.com) - -An OpenAI API key - -(Optional) A Serper API key for web search capabilities - -(Optional) An OpenWeatherMap API key for weather information - -Installation - -This guide uses uv for fast, reliable Python package management. If you don't have uv installed: - -# Install uv -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Or using pip -pip install uv - - -Create a New CrewAI Project - -# Create a new CrewAI crew -uv tool run crewai create crew crew_demo - -# Navigate to the project -cd crew_demo - - -Install Dependencies - -Add the required dependencies to your pyproject.toml: - -[project] -name = "crew-demo" -version = "0.1.0" -description = "CrewAI agent integrated with CometChat" -authors = [{name = "Your Name", email = "your.email@example.com"}] -requires-python = ">=3.13" -dependencies = [ - "crewai[tools]==1.7.2", - "fastapi>=0.115.6", - "uvicorn>=0.34.0", - "httpx>=0.28.1", - "python-dotenv>=1.0.1", - "openai>= 1.13.3" -] - -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[tool.uv] -dev-dependencies = [] - - -Install all dependencies: - -uv sync - - -Project Structure - -After creation and setup, your project structure should look like this: - -crew-demo/ -│ -├── knowledge/ -│ └── user_preference.txt # Optional: Agent knowledge base -│ -├── src/ -│ └── crew_demo/ -│ ├── __init__.py -│ ├── config/ -│ │ ├── agents.yaml # Agent definitions -│ │ └── tasks.yaml # Task definitions -│ ├── crew.py # Crew setup and configuration -│ ├── main.py # FastAPI server with streaming -│ └── tools/ -│ ├── __init__.py -│ └── weather_tool.py # Custom weather tool -│ -├── tests/ # Test files -├── .env # Environment variables -├── pyproject.toml # Project dependencies -└── README.md -└── uv.lock - - -Step 1: Configure Environment Variables - -Create a .env file in the project root: - -# OpenAI Configuration -MODEL=gpt-4o-mini -OPENAI_API_KEY=your_openai_api_key_here - -# Tool API Keys -SERPER_API_KEY=your_serper_api_key_here -OPENWEATHER_API_KEY=your_openweather_api_key_here - -# Server Configuration -PORT=8000 -HOST=0.0.0.0 - -# CrewAI Configuration (disable tracing for production) -CREWAI_TRACING_ENABLED=false - - -Important Notes: - -Get your OpenAI API key from https://platform.openai.com/api-keys - -Get Serper API key (for web search) from https://serper.dev/ - -Get OpenWeatherMap API key from https://openweathermap.org/api - -Step 2: Create the Weather Tool - -Create src/crew_demo/tools/weather_tool.py: - -"""Weather tool for fetching current weather information""" - -import os -import json -from datetime import datetime, timezone - -import httpx -from crewai.tools import tool - - -@tool("get_weather") -def get_weather(location: str, unit: str = "fahrenheit") -> str: - """Get the current weather for a location - - Args: - location: The city name, e.g. San Francisco, Mumbai, London - unit: The temperature unit (celsius or fahrenheit) - - Returns: - Weather information as a JSON string with temperature, conditions, etc. - - Raises: - Exception: If the API call fails or location is not found - """ - - api_key = os.getenv("OPENWEATHER_API_KEY") - if not api_key: - raise Exception("OPENWEATHER_API_KEY environment variable not set") - - url = "http://api.openweathermap.org/data/2.5/weather" - params = { - "q": location, - "appid": api_key, - "units": "imperial" if unit.lower() == "fahrenheit" else "metric" - } - - try: - response = httpx.get(url, params=params, timeout=10.0) - response.raise_for_status() - - data = response.json() - - # Extract relevant information - result = { - "location": data["name"], - "country": data["sys"]["country"], - "temperature": round(data["main"]["temp"], 1), - "feels_like": round(data["main"]["feels_like"], 1), - "unit": unit, - "conditions": data["weather"][0]["description"], - "humidity": data["main"]["humidity"], - "wind_speed": data["wind"]["speed"], - "timestamp": datetime.now(tz=timezone.utc).isoformat() - } - - # Return clean JSON - NO error field when successful - return json.dumps(result) - - except httpx.HTTPStatusError as e: - if e.response.status_code == 404: - raise Exception( - f"Location '{location}' not found. " - "Please check the spelling and try again." - ) - else: - raise Exception( - f"Weather API returned error {e.response.status_code}. " - "Please try again later." - ) - - except httpx.TimeoutException: - raise Exception("Weather service timed out. Please try again.") - - except httpx.RequestError as e: - raise Exception(f"Could not connect to weather service: {str(e)}") - - except Exception as e: - raise Exception(f"Unexpected error fetching weather: {str(e)}") - - -Key Design Decisions: - -Raises exceptions for errors: Makes it clear to the agent when something fails - -Returns clean JSON on success: No "error" field needed - -Type hints and docstrings: Helps the LLM understand how to use the tool - -Step 3: Define Your Agent - -Create src/crew_demo/config/agents.yaml: - -assistant: - role: > - Friendly Conversational Assistant - goal: > - Help users with questions, greetings, and provide weather information when requested - backstory: > - You're a helpful, friendly assistant who responds naturally to greetings - and casual conversation. - - When users ask about weather, you MUST use the get_weather tool. After the tool - returns results, check if there is an "error" field in the response: - - If there's an "error" field, tell the user the weather could not be retrieved - - If there's NO "error" field, the tool succeeded - use the weather data to give - the user a helpful response with temperature, conditions, and other details - - IMPORTANT: When a tool returns data successfully (no error field), that means it - worked! Don't apologize or say you couldn't get the data. Use the data that was - returned. - - For general questions, you can check your training data or search the web if needed. - - Always respond with ONLY your final answer - never show your thinking process, - thoughts, or tool observations to the user. The user should only see your final - response. - - -Important: The backstory explicitly tells the agent: - -How to interpret tool results (no error = success) - -When to use which tool - -To hide reasoning and show only the final answer - -Step 4: Define Your Task - -Create src/crew_demo/config/tasks.yaml: - -conversation_task: - description: > - Respond to the user's message: {user_message} - - Previous conversation context: {conversation_history} - - If it's a greeting, respond warmly. If asking about weather, use the weather tool. - For other questions, use search if needed. - expected_output: > - A helpful, natural response to the user's query - agent: assistant - - -Task Parameters: - -{user_message}: The current user message - -{conversation_history}: Previous messages for context - -Step 5: Set Up the Crew - -Create src/crew_demo/crew.py: - -"""CrewAI - Crew Configuration""" - -from typing import List - -from crewai_tools import SerperDevTool -from crewai.crew import EventListener -from crewai import Agent, Crew, Process, Task -from crewai.agents.agent_builder.base_agent import BaseAgent -from crewai.events.utils.console_formatter import ConsoleFormatter -from crewai.project import CrewBase, agent, crew, task - -# Import your custom weather tool -from crew_demo.tools.weather_tool import get_weather - -# Configure event listener for optional logging -my_listener = EventListener() -my_listener.formatter = ConsoleFormatter(verbose=False) - - -@CrewBase -class CrewDemo(): - """CrewDemo crew""" - - agents: List[BaseAgent] - tasks: List[Task] - - # Initialize tools - search_tool = SerperDevTool() - - # Config file paths - agents_config = "config/agents.yaml" - tasks_config = "config/tasks.yaml" - - @agent - def assistant(self) -> Agent: - """Conversational Assistant Agent""" - return Agent( - config=self.agents_config['assistant'], # type: ignore[index] - tools=[self.search_tool, get_weather], - verbose=False, # Set to True for debugging - memory=False # Stateless - we pass conversation history manually - ) - - @task - def conversation_task(self) -> Task: - """Conversation Task""" - return Task( - config=self.tasks_config['conversation_task'], # type: ignore[index] - markdown=True, - verbose=False, - ) - - @crew - def crew(self) -> Crew: - """Creates the CrewDemo crew""" - return Crew( - agents=self.agents, - tasks=self.tasks, - process=Process.sequential, - verbose=False, - stream=True, # Enable streaming - output_log_file=False, - tracing=False, - ) - - -Key Configuration: - -memory=False: We handle conversation history manually - -stream=True: Enables real-time streaming - -verbose=False: Hides internal logging (set True for debugging) - -Step 6: Create the FastAPI Server with Streaming - -Create src/crew_demo/main.py: - -"""Crew entry point with improved filtering""" - -import os -import json -import warnings -from typing import List, Optional -from datetime import datetime -from uuid import uuid4 - -from crewai.types.streaming import StreamChunk, StreamChunkType, CrewStreamingOutput - -import uvicorn -from pydantic import BaseModel - -from fastapi import FastAPI -from fastapi.responses import StreamingResponse - -from crew_demo.crew import CrewDemo - -warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd") - -app = FastAPI() - -class Message(BaseModel): - """message fields""" - role: str - content: str - -class KickoffRequest(BaseModel): - """Incoming request model""" - messages: List[Message] - threadId: Optional[str] = None - runId: Optional[str] = None - -def stream_crew(inputs: dict): - """Generator that yields AG-UI compatible JSON chunks with robust filtering.""" - - message_id = str(uuid4()) - - yield json.dumps({ - "type": "text_start", - "message_id": message_id - }) + "\n" - - streaming: CrewStreamingOutput = CrewDemo().crew().kickoff(inputs=inputs) - - # State tracking - buffer = "" - final_answer_started = False - final_answer_content = "" - thought_keywords = ["Thought:", "Action:", "Action Input:", "Observation:"] - - for chunk in streaming: - if not isinstance(chunk, StreamChunk): - continue - - # Handle tool calls - if chunk.chunk_type == StreamChunkType.TOOL_CALL and chunk.tool_call: - tool_call_id = str(uuid4()) - - yield json.dumps({ - "type": "tool_call_start", - "tool_call_id": tool_call_id, - "name": chunk.tool_call.tool_name, - "parent_message_id": message_id - }) + "\n" - - try: - args = json.loads( - chunk.tool_call.arguments - ) if isinstance(chunk.tool_call.arguments, str) else chunk.tool_call.arguments - - yield json.dumps({ - "type": "tool_call_args", - "tool_call_id": tool_call_id, - "args": args - }) + "\n" - except json.JSONDecodeError: - pass - - yield json.dumps({ - "type": "tool_call_end", - "tool_call_id": tool_call_id, - "name": chunk.tool_call.tool_name - }) + "\n" - - if hasattr(chunk.tool_call, 'result') and chunk.tool_call.result: - yield json.dumps({ - "type": "tool_result", - "tool_call_id": tool_call_id, - "result": chunk.tool_call.result - }) + "\n" - - continue - - # Handle text chunks - if chunk.chunk_type == StreamChunkType.TEXT: - buffer += chunk.content - - # Look for "Final Answer:" marker - if not final_answer_started and "Final Answer:" in buffer: - final_answer_started = True - - # Extract everything after "Final Answer:" - parts = buffer.split("Final Answer:")[-1].strip() - if len(parts) > 1: - final_answer_content = parts[1].strip() - - # Stream what we have so far (if any) - if final_answer_content: - yield json.dumps({ - "type": "text_delta", - "message_id": message_id, - "content": final_answer_content - }) + "\n" - - buffer = "" # Clear buffer - - elif final_answer_started: - # We're in final answer mode - # Check if we're hitting a new thought process - if any(keyword in chunk.content for keyword in thought_keywords): - # Stop streaming - we've hit the thought process again - final_answer_started = False - break - - # Stream this chunk - yield json.dumps({ - "type": "text_delta", - "message_id": message_id, - "content": chunk.content - }) + "\n" - - # sometimes, the "Final Answer:" text is not present, so stream all - if not final_answer_started: - yield json.dumps({ - "type": "text_delta", - "message_id": message_id, - "content": buffer.strip() - }) + "\n" - - yield json.dumps({ - "type": "text_end", - "message_id": message_id - }) + "\n" - - yield json.dumps({"type": "done"}) + "\n" - -@app.post("/kickoff") -async def kickoff(request: KickoffRequest): - """This kicks off the request""" - # print(json.dumps(request.model_dump(), indent=4)) - - inputs = { - 'user_message': request.messages[-1].content, - 'conversation_history': "\n".join( - [f"{m.role}: {m.content}" for m in request.messages[:-1]] - ), - 'current_year': str(datetime.now().year) - } - - return StreamingResponse( - stream_crew(inputs), - media_type="application/x-ndjson" - ) - -def run(): - """Start the app""" - port = int(os.getenv("PORT", "8000")) - host = os.getenv("HOST", "0.0.0.0") - - uvicorn.run(app, host=host, port=port) - -if __name__ == "__main__": - run() - - - -Streaming Architecture: - -Tool events are emitted in real-time during the loop. - -Text response is emitted as soon as the chunks arrive. - -Clean separation between tool execution and final answer. - -No thought process leaked - only the final result is shown. - -Step 7: Test Locally - -Run the Server - -# Activate virtual environment (if using uv) -source .venv/bin/activate # On Unix/macOS -# or -.venv\Scripts\activate # On Windows - -# Run the server -crewai run - - -The server will start at http://localhost:8000 - -Test with cURL - -Test 1: Simple Greeting - -curl -X POST http://localhost:8000/kickoff \ - -H "Content-Type: application/json" \ - -d '{ - "thread_id": "thread_123", - "run_id": "run_456", - "messages": [ - { - "role": "user", - "content": "Hello!" - } - ] - }' - - -Expected Response: - -{"type": "text_start", "message_id": "abc-123"} -{"type": "text_delta", "message_id": "abc-123", "content": "Hello! How can I assist you today?"} -{"type": "text_end", "message_id": "abc-123"} -{"type": "done"} - - -Test 2: Weather Query - -curl -X POST http://localhost:8000/kickoff \ - -H "Content-Type: application/json" \ - -d '{ - "thread_id": "thread_123", - "run_id": "run_789", - "messages": [ - { - "role": "user", - "content": "What is the weather in Mumbai?" - } - ] - }' - - -Expected Response: - -{"type": "text_start", "message_id": "def-456"} -{"type": "tool_call_start", "tool_call_id": "tool_789", "name": "get_weather", "parent_message_id": "def-456"} -{"type": "tool_call_args", "tool_call_id": "tool_789", "args": {"location": "Mumbai", "unit": "fahrenheit"}} -{"type": "tool_call_end", "tool_call_id": "tool_789", "name": "get_weather"} -{"type": "tool_result", "tool_call_id": "tool_789", "result": "{\"location\": \"Mumbai\", \"temperature\": 84.2, ...}"} -{"type": "text_delta", "message_id": "def-456", "content": "The current weather in Mumbai is 84.2°F..."} -{"type": "text_end", "message_id": "def-456"} -{"type": "done"} - - -Test 3: Conversation Context - -curl -X POST http://localhost:8000/kickoff \ - -H "Content-Type: application/json" \ - -d '{ - "thread_id": "thread_123", - "run_id": "run_999", - "messages": [ - { - "role": "user", - "content": "What is the weather in Paris?" - }, - { - "role": "assistant", - "content": "The weather in Paris is 15°C and cloudy." - }, - { - "role": "user", - "content": "How about London?" - } - ] - }' - - -The agent will understand "London" refers to weather based on conversation context. - -Step 8: Configure CometChat Agent - -In the CometChat Dashboard: - -Go to your App. - -Navigate to: AI Agents → BYO Agents - -Click "Add Agent" to create a new CrewAI agent or edit the existing CrewAI agent, if already available. - -Fill in the following fields: - -Name: CrewAI Weather Assistant - -Deployment URL: https://yourdomain.com/kickoff - -Greeting: "Hello! I can help you with weather information and answer your questions." - -Headers (optional): Valid JSON with authorization tokens - -Example Header Configuration: - -{ - "Authorization": "Bearer YOUR_TOKEN" -} - - -Important: The JSON must be flat (width = 1). No nested structures. - -Save the configuration - -Understanding the Event Flow - -Request Flow - -User sends message in CometChat app - -CometChat forwards to your /kickoff endpoint. Please make sure the agent is publicly reachable. - -{ - "thread_id": "unique_thread_id", - "run_id": "unique_run_id", - "messages": [ - {"role": "user", "content": "What's the weather like in Shanghai?"} - ] -} - - -Your agent processes and streams events - -CometChat delivers to end user in real-time - -Event Types - -Your CrewAI agent emits these events: - -Text Events: - -text_start: Beginning of assistant message - -text_delta: Chunks of text content - -text_end: End of assistant message - -Tool Events: - -tool_call_start: Tool invocation begins - -tool_call_args: Tool arguments - -tool_call_end: Tool execution complete - -tool_result: Tool execution result - -Control Events: - -done: Stream complete - -error: Error occurred - -Example Complete Flow - -User: "What's the weather in Tokyo?" - -Events Emitted: - -1. text_start (message begins) -2. tool_call_start (calling get_weather) -3. tool_call_args (location: Tokyo) -4. tool_call_end (tool finished) -5. tool_result (weather data) -6. text_delta (agent's response text) -7. text_end (message complete) -8. done (stream finished) - - -User Sees: - -🌤️ Calling get_weather... -📍 Location: Tokyo - -The current weather in Tokyo is 22°C -with clear skies. The humidity is 65% -and wind speed is 8 mph. - - -Customizing Your Agent - -Adding New Tools - -Create a new tool in src/crew_demo/tools/: - -# src/crew_demo/tools/calculator_tool.py -from crewai.tools import tool - -@tool("calculate") -def calculate(expression: str) -> str: - """Safely evaluate a mathematical expression - - Args: - expression: Mathematical expression like "2 + 2" or "10 * 5" - - Returns: - Result of the calculation - """ - try: - # Use a safe eval or math library - result = eval(expression, {"__builtins__": {}}, {}) - return str(result) - except Exception as e: - raise Exception(f"Calculation error: {str(e)}") - - -Register it in crew.py: - -from crew_demo.tools.calculator_tool import calculate - -@agent -def assistant(self) -> Agent: - return Agent( - config=self.agents_config['assistant'], - tools=[self.search_tool, get_weather, calculate], # Add new tool - verbose=False, - memory=False - ) - - -Using Different LLM Providers - -CrewAI supports multiple LLM providers. See CrewAI's LLM documentation. Update your .env: - -Anthropic Claude: - -# .env -ANTHROPIC_API_KEY=your_anthropic_key -LLM_MODEL=claude-3-5-sonnet-20241022 - - -Then in your agent configuration, CrewAI will automatically detect and use Anthropic. - -See CrewAI docs for more providers: https://docs.crewai.com/concepts/llms - -Multi-Agent Workflows - -Create multiple agents for complex workflows: - -# agents.yaml -researcher: - role: Research Specialist - goal: Find accurate information on the web - backstory: Expert at finding and verifying information - -writer: - role: Content Writer - goal: Create engaging, informative responses - backstory: Skilled at turning research into clear answers - - -# crew.py -@agent -def researcher(self) -> Agent: - return Agent( - config=self.agents_config['researcher'], - tools=[self.search_tool], - verbose=False - ) - -@agent -def writer(self) -> Agent: - return Agent( - config=self.agents_config['writer'], - tools=[], - verbose=False - ) - -@crew -def crew(self) -> Crew: - return Crew( - agents=[self.researcher(), self.writer()], - tasks=self.tasks, - process=Process.sequential, # Or Process.hierarchical - verbose=False, - stream=True - ) - - -Requirements for CometChat Compatibility - -Your CrewAI agent must: - -Expose an HTTP endpoint (e.g., /kickoff) that accepts POST requests - -Accept the request format: - -{ - "thread_id": str, - "run_id": str, - "messages": List[Message] -} - - -Stream responses using newline-delimited JSON (NDJSON) with these event types: - -text_start - -text_delta - -text_end - -tool_call_start - -tool_call_args - -tool_call_end - -tool_result - -done - -error - -Include proper media type: - -"Content-Type": "application/x-ndjson" - - -Beyond these requirements, you have complete flexibility in: - -Your agent's implementation - -Tool definitions and execution - -LLM provider choice - -Multi-agent orchestration - -Error handling strategies - -Troubleshooting - -Agent Not Responding - -Check: - -✅ Server is running: curl http://localhost:8000/ - -✅ Environment variables are set correctly - -✅ API keys are valid and have credits - -✅ Review server logs for errors - -Debug: - -# In crew.py, enable verbose mode -verbose=True # Shows agent reasoning - -# In main.py, add logging -import logging -logging.basicConfig(level=logging.DEBUG) - - -Tools Not Executing - -Check: - -✅ Tool is imported and registered in crew.py - -✅ Tool decorator is used: @tool("tool_name") - -✅ Tool docstring is clear and descriptive - -✅ Tool raises exceptions for errors (not return error JSON) - -Debug: - -# Test tool directly -from crew_demo.tools.weather_tool import get_weather -result = get_weather("London") -print(result) - - -Streaming Issues - -Check: - -✅ stream=True in Crew configuration - -✅ Events are yielded with \n at the end - -✅ Using StreamingResponse with media_type="application/x-ndjson" - -✅ CORS is configured if testing from browser - -Debug: - -# Test streaming with curl -N flag -curl -N -X POST http://localhost:8000/kickoff \ - -H "Content-Type: application/json" \ - -d '{"messages": [{"role": "user", "content": "test"}]}' - - -Duplicate Responses or Thought Process Visible - -Solution: This happens when the agent outputs multiple "Final Answer:" sections. The fix is to use streaming.result (already implemented in the provided main.py). - -Verify: - -✅ Using the main.py provided in this guide - -✅ Agent backstory includes instruction to hide thinking - -✅ streaming.result is used instead of parsing chunks - -Memory Issues - -Note: This guide uses stateless agents (memory=False) and passes conversation history manually. This ensures: - -✅ Consistent behavior across requests - -✅ No state pollution between users - -✅ Full control over conversation context - -If you need persistent memory: - -# crew.py -@agent -def assistant(self) -> Agent: - return Agent( - config=self.agents_config['assistant'], - tools=[self.search_tool, get_weather], - verbose=False, - memory=True # Enable memory - ) - - -Deployment - -Local Development - -# Run with auto-reload -uvicorn crew_demo.main:app --reload --host 0.0.0.0 --port 8000 - - -Production Deployment - -This CrewAI agent can be deployed to any platform that supports Python web applications. Ensure: - -Environment variables are configured in your hosting platform - -Endpoint is publicly accessible (HTTPS recommended) - -Server can handle concurrent requests (consider worker processes) - -Logging is configured for monitoring - -Generic Production Command: - -uvicorn crew_demo.main:app --host 0.0.0.0 --port 8000 --workers 4 - - -Important: Once deployed, update the CometChat agent's Deployment URL with your live endpoint (e.g., https://your-deployment-endpoint.com/kickoff). - -Security Recommendations - -Use HTTPS for production endpoints - -Validate incoming requests (optional: verify CometChat signature) - -Set rate limits to prevent abuse - -Monitor API usage and costs - -Keep API keys secure (never commit to git) - -# Example: Add authentication header validation -@app.post("/kickoff") -async def kickoff(request: KickoffRequest, authorization: str = Header(None)): - # Validate authorization token - if authorization != f"Bearer {os.getenv('EXPECTED_TOKEN')}": - raise HTTPException(status_code=401, detail="Unauthorized") - - # Process request... - - -Advanced Features - -Conversation Memory - -Add conversation memory to maintain context across sessions: - -# crew.py -from crewai.memory import ShortTermMemory, LongTermMemory - -@crew -def crew(self) -> Crew: - return Crew( - agents=self.agents, - tasks=self.tasks, - process=Process.sequential, - verbose=False, - stream=True, - memory=True, # Enable crew memory - short_term_memory=ShortTermMemory(), - long_term_memory=LongTermMemory() - ) - - -Custom Knowledge Base - -Place documents in the knowledge/ folder: - -knowledge/ -├── user_preference.txt # User-specific info -├── company_policies.md # Company guidelines -└── product_catalog.json # Product information - - -The agent can access these documents during execution. - -Structured Output - -Force structured responses: - -# tasks.yaml -conversation_task: - description: > - Respond to: {user_message} - expected_output: > - A JSON object with: - { - "response": "your answer here", - "confidence": 0.0-1.0, - "sources": ["source1", "source2"] - } - agent: assistant - output_json: true # Force JSON output - - -Best Practices - -1. Tool Design - -✅ Raise exceptions for errors (don't return error JSON) - -✅ Return structured data when possible - -✅ Include clear docstrings for LLM understanding - -✅ Keep tools focused (single responsibility) - -2. Agent Instructions - -✅ Be explicit about tool usage - -✅ Define success/failure criteria clearly - -✅ Instruct to hide internal reasoning - -✅ Provide examples in backstory - -3. Error Handling - -✅ Catch and handle API errors gracefully - -✅ Provide informative error messages - -✅ Log errors for debugging - -✅ Don't expose internal details to users - -4. Performance - -✅ Use streaming for better UX - -✅ Keep agents stateless when possible - -✅ Cache tool results when appropriate - -✅ Monitor API costs and usage - -5. Testing - -✅ Test each tool independently - -✅ Test with various conversation contexts - -✅ Test error scenarios - -✅ Monitor streaming performance - -Example Use Cases - -1. Customer Support Agent - -# agents.yaml -support_agent: - role: Customer Support Specialist - goal: Help customers with inquiries and issues - backstory: > - Expert at understanding customer needs and providing solutions. - Can search knowledge base, check order status, and escalate when needed. - - -Tools: knowledge search, order lookup, ticket creation - -2. Research Assistant - -# agents.yaml -researcher: - role: Research Assistant - goal: Find and synthesize information from multiple sources - backstory: > - Skilled at web research, fact-checking, and presenting findings clearly. - - -Tools: web search, PDF reader, citation formatter - -3. Data Analyst Agent - -# agents.yaml -analyst: - role: Data Analyst - goal: Analyze data and provide insights - backstory: > - Expert at data analysis, visualization, and explaining findings. - - -Tools: database query, chart generator, statistical analysis - -Conclusion - -By combining the CometChat Agentic Interface with CrewAI, developers can connect sophisticated AI agents with end users instantly and securely. CrewAI provides powerful agent orchestration, tool integration, and multi-agent collaboration, while CometChat handles the real-time communication layer, authentication, and message delivery. - -This architecture provides the flexibility of building agents with any LLM backend and custom tool integrations while maintaining the scalability and security of CometChat's platform. - -Additional Resources - -CrewAI Documentation: https://docs.crewai.com/ - -CometChat Dashboard: https://app.cometchat.com/ - -CometChat Docs: https://docs.cometchat.com/ - -OpenAI API: https://platform.openai.com/docs - -FastAPI Documentation: https://fastapi.tiangolo.com/ - -Support - -For issues or questions: - -CrewAI: https://github.com/crewAIInc/crewAI/issues - -CometChat: https://www.cometchat.com - -Version: 0.0.1 -CrewAI Version: 1.7.2 -Python Version: 3.13.2+ \ No newline at end of file From 1e743e44c87004570853a13079250ff67db07232 Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Tue, 13 Jan 2026 20:19:32 +0530 Subject: [PATCH 09/15] Update crew-ai.mdx --- ai-agents/crew-ai.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ai-agents/crew-ai.mdx b/ai-agents/crew-ai.mdx index 82ea274f..ebb8a62f 100644 --- a/ai-agents/crew-ai.mdx +++ b/ai-agents/crew-ai.mdx @@ -8,7 +8,7 @@ import { Steps, Step, CardGroup, Card } from 'mintlify'; ## What you’ll build -- A **CrewAI** agent exposed via a public endpoint (e.g., FastAPI `/kickoff`) that streams NDJSON. +- A **CrewAI** agent exposed via a public endpoint (e.g., FastAPI `/stream`) that streams NDJSON. - The same agent **connected to CometChat** (Agent ID + Deployment URL). - A **customized chat experience** using **UI Kit Builder**. - An export to **React UI Kit code** _or_ **Chat Widget** for integration. @@ -39,7 +39,7 @@ import { Steps, Step, CardGroup, Card } from 'mintlify'; ## Step 2 - Connect your CrewAI Agent -Navigate to **AI Agent → Get Started** and then **AI Agents → Add Agent**. +Navigate to **AI Agent → BYO Agents** and then **Get Started / Integrate → Choose CrewAI**. From 53ece88e3db0f9bcbd6305d8a4e716ee30b20528 Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Tue, 13 Jan 2026 20:30:42 +0530 Subject: [PATCH 10/15] Update crew-ai.mdx --- ai-agents/crew-ai.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ai-agents/crew-ai.mdx b/ai-agents/crew-ai.mdx index ebb8a62f..d0ba2192 100644 --- a/ai-agents/crew-ai.mdx +++ b/ai-agents/crew-ai.mdx @@ -57,7 +57,7 @@ Navigate to **AI Agent → BYO Agents** and then **Get Started / Integrate → C Paste:
  • Agent ID — a unique handle that matches how you route traffic (e.g., support).
  • -
  • Deployment URL — the public HTTPS endpoint exposed by your CrewAI service (e.g., /kickoff).
  • +
  • Deployment URL — the public HTTPS endpoint exposed by your CrewAI service (e.g., /stream).
  • (Optional) Headers — flat JSON auth headers your FastAPI deployment expects.
@@ -143,10 +143,10 @@ Choose how you’ll ship the experience (Widget or React UI Kit export). ## Step 6 - Deploy & Secure (Reference) -If you need a starter CrewAI endpoint, you can reuse the `/kickoff` pattern from your CrewAI project: +If you need a starter CrewAI endpoint, you can reuse the `/stream` pattern from your CrewAI project: ```python -@app.post("/kickoff") +@app.post("/stream") async def kickoff(request: KickoffRequest): inputs = { "user_message": request.messages[-1].content, From 1f1328c8ede5cd02d19e5853ba70cc27262d1d57 Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Tue, 13 Jan 2026 20:42:58 +0530 Subject: [PATCH 11/15] Update crew-ai.mdx --- ai-agents/crew-ai.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ai-agents/crew-ai.mdx b/ai-agents/crew-ai.mdx index d0ba2192..20f4775d 100644 --- a/ai-agents/crew-ai.mdx +++ b/ai-agents/crew-ai.mdx @@ -39,7 +39,7 @@ import { Steps, Step, CardGroup, Card } from 'mintlify'; ## Step 2 - Connect your CrewAI Agent -Navigate to **AI Agent → BYO Agents** and then **Get Started / Integrate → Choose CrewAI**. +Navigate to **AI Agent → BYO Agents** and then **Get Started / Integrate**. From e7a6c58b0d723d267a59093f98dc09272c3037cf Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Tue, 13 Jan 2026 22:05:28 +0530 Subject: [PATCH 12/15] Update crew-ai.mdx --- ai-agents/crew-ai.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ai-agents/crew-ai.mdx b/ai-agents/crew-ai.mdx index 20f4775d..75331ff3 100644 --- a/ai-agents/crew-ai.mdx +++ b/ai-agents/crew-ai.mdx @@ -8,7 +8,7 @@ import { Steps, Step, CardGroup, Card } from 'mintlify'; ## What you’ll build -- A **CrewAI** agent exposed via a public endpoint (e.g., FastAPI `/stream`) that streams NDJSON. +- A **CrewAI** agent exposed via a public endpoint (e.g., FastAPI `/kickoff`) that streams NDJSON. - The same agent **connected to CometChat** (Agent ID + Deployment URL). - A **customized chat experience** using **UI Kit Builder**. - An export to **React UI Kit code** _or_ **Chat Widget** for integration. @@ -57,7 +57,7 @@ Navigate to **AI Agent → BYO Agents** and then **Get Started / Integrate**. Paste:
  • Agent ID — a unique handle that matches how you route traffic (e.g., support).
  • -
  • Deployment URL — the public HTTPS endpoint exposed by your CrewAI service (e.g., /stream).
  • +
  • Deployment URL — the public HTTPS endpoint exposed by your CrewAI service (e.g., /kickoff).
  • (Optional) Headers — flat JSON auth headers your FastAPI deployment expects.
@@ -143,10 +143,10 @@ Choose how you’ll ship the experience (Widget or React UI Kit export). ## Step 6 - Deploy & Secure (Reference) -If you need a starter CrewAI endpoint, you can reuse the `/stream` pattern from your CrewAI project: +If you need a starter CrewAI endpoint, you can reuse the `/kickoff` pattern from your CrewAI project: ```python -@app.post("/stream") +@app.post("/kickoff") async def kickoff(request: KickoffRequest): inputs = { "user_message": request.messages[-1].content, From bd975385003ec07c764ee596a7ad936bc7e50da2 Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Tue, 13 Jan 2026 23:23:45 +0530 Subject: [PATCH 13/15] updates github repo --- ai-agents/crew-ai-knowledge-agent.mdx | 2 +- ai-agents/crew-ai-product-hunt-agent.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ai-agents/crew-ai-knowledge-agent.mdx b/ai-agents/crew-ai-knowledge-agent.mdx index 47a51f6f..94d61e64 100644 --- a/ai-agents/crew-ai-knowledge-agent.mdx +++ b/ai-agents/crew-ai-knowledge-agent.mdx @@ -6,7 +6,7 @@ description: "Create a CrewAI knowledge agent that answers from your docs, strea import { Steps, Step } from 'mintlify'; -Based on the refreshed [`ai-agent-crew-ai-examples`](https://github.com/) codebase, here’s how to run the `knowledge_agent` FastAPI service, ingest docs, and stream answers into CometChat. +Based on the refreshed [`ai-agent-crew-ai-examples`](https://github.com/cometchat/ai-agent-crew-ai-examples) codebase, here’s how to run the `knowledge_agent` FastAPI service, ingest docs, and stream answers into CometChat. *** diff --git a/ai-agents/crew-ai-product-hunt-agent.mdx b/ai-agents/crew-ai-product-hunt-agent.mdx index c3a5c5e8..041e767e 100644 --- a/ai-agents/crew-ai-product-hunt-agent.mdx +++ b/ai-agents/crew-ai-product-hunt-agent.mdx @@ -6,7 +6,7 @@ description: "Create a CrewAI agent that fetches Product Hunt posts, answers lau import { Steps, Step } from 'mintlify'; -Refreshed for the latest [`ai-agent-crew-ai-examples`](https://github.com/) codebase: run the `product_hunt_agent` FastAPI service, wire it to CometChat, and stream SSE updates (with optional confetti actions). +Refreshed for the latest [`ai-agent-crew-ai-examples`](https://github.com/cometchat/ai-agent-crew-ai-examples) codebase: run the `product_hunt_agent` FastAPI service, wire it to CometChat, and stream SSE updates (with optional confetti actions). --- From 59bc5c22056bd549e1d4563f67f58b380be6ae5a Mon Sep 17 00:00:00 2001 From: Swapnil Godambe Date: Tue, 13 Jan 2026 23:30:33 +0530 Subject: [PATCH 14/15] updates the doc --- ai-agents/crew-ai-knowledge-agent.mdx | 18 +++++++++++------- ai-agents/crew-ai-product-hunt-agent.mdx | 23 ++++++++++++++--------- ai-agents/crew-ai.mdx | 8 +++++--- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/ai-agents/crew-ai-knowledge-agent.mdx b/ai-agents/crew-ai-knowledge-agent.mdx index 94d61e64..6d2fd931 100644 --- a/ai-agents/crew-ai-knowledge-agent.mdx +++ b/ai-agents/crew-ai-knowledge-agent.mdx @@ -14,7 +14,7 @@ Based on the refreshed [`ai-agent-crew-ai-examples`](https://github.com/cometcha - A **CrewAI** agent that grounds replies in your ingested docs (per namespace). - A **FastAPI** service with ingest/search/generate endpoints plus a `/stream` SSE. -- **CometChat AI Agent** wiring that consumes newline-delimited JSON chunks (`text_delta`, `text_done`, `done`). +- **CometChat AI Agent** wiring that consumes newline-delimited JSON chunks (`text_start`, `text_delta`, `text_end`, `done`). *** @@ -51,8 +51,9 @@ uvicorn knowledge_agent.main:app --host 0.0.0.0 --port 8000 --reload Date: Tue, 13 Jan 2026 23:34:07 +0530 Subject: [PATCH 15/15] updates navigation --- ai-agents/crew-ai-knowledge-agent.mdx | 2 +- ai-agents/crew-ai-product-hunt-agent.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ai-agents/crew-ai-knowledge-agent.mdx b/ai-agents/crew-ai-knowledge-agent.mdx index 6d2fd931..06bbd47b 100644 --- a/ai-agents/crew-ai-knowledge-agent.mdx +++ b/ai-agents/crew-ai-knowledge-agent.mdx @@ -136,7 +136,7 @@ crew = Crew(agents=[agent], tasks=[task], process=Process.sequential) ## Wire it to CometChat -- Dashboard → AI Agents → **Provider: CrewAI** → **Agent ID** (e.g., `knowledge`) → **Deployment URL** = your public `/stream`. +- Dashboard → **AI Agent → BYO Agents** and then **Get Started / Integrate → Choose CrewAI**. → **Agent ID** (e.g., `knowledge`) → **Deployment URL** = your public `/stream`. - The SSE stream is newline-delimited JSON; CometChat AG-UI adapters parse `text_start`/`text_delta`/`text_end` and stop on `done`. Message IDs, thread IDs, and run IDs are included for threading. - Use namespaces to keep customer/workspace data separate; pass `namespace` in the payload or inside `tool_params.namespace` (either works; defaults to `default` if omitted). - Keep secrets server-side; add auth headers on the FastAPI route if needed. diff --git a/ai-agents/crew-ai-product-hunt-agent.mdx b/ai-agents/crew-ai-product-hunt-agent.mdx index 1a029c14..7b57387b 100644 --- a/ai-agents/crew-ai-product-hunt-agent.mdx +++ b/ai-agents/crew-ai-product-hunt-agent.mdx @@ -98,6 +98,6 @@ All tools run server-side; if `PRODUCTHUNT_API_TOKEN` is missing, top/timeframe ## Wire it to CometChat -- Dashboard → AI Agents → **Provider: CrewAI** → **Agent ID** (e.g., `product_hunt`) → **Deployment URL** = your public `/stream`. +- Dashboard → **AI Agent → BYO Agents** and then **Get Started / Integrate → Choose CrewAI**. → **Agent ID** (e.g., `product_hunt`) → **Deployment URL** = your public `/stream`. - Listen for `text_start`/`text_delta`/`text_end` to render streaming text; stop on `done`. - When `triggerConfetti` returns, map the payload to your UI handler (Widget/React UI Kit). Keep API tokens server-side.