diff --git a/src/filesystem/index.ts b/src/filesystem/index.ts index 7888196285..e8ddc233f8 100644 --- a/src/filesystem/index.ts +++ b/src/filesystem/index.ts @@ -1,11 +1,8 @@ #!/usr/bin/env node -import { Server } from "@modelcontextprotocol/sdk/server/index.js"; +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { - CallToolRequestSchema, - ListToolsRequestSchema, - ToolSchema, RootsListChangedNotificationSchema, type Root, } from "@modelcontextprotocol/sdk/types.js"; @@ -13,7 +10,6 @@ import fs from "fs/promises"; import { createReadStream } from "fs"; import path from "path"; import { z } from "zod"; -import { zodToJsonSchema } from "zod-to-json-schema"; import { minimatch } from "minimatch"; import { normalizePath, expandHome } from './path-utils.js'; import { getValidRootDirectories } from './roots-utils.js'; @@ -143,20 +139,12 @@ const GetFileInfoArgsSchema = z.object({ path: z.string(), }); -const ToolInputSchema = ToolSchema.shape.inputSchema; -type ToolInput = z.infer; - // Server setup -const server = new Server( +const server = new McpServer( { name: "secure-filesystem-server", version: "0.2.0", - }, - { - capabilities: { - tools: {}, - }, - }, + } ); // Reads a file as a stream of buffers, concatenates them, and then encodes @@ -177,468 +165,567 @@ async function readFileAsBase64Stream(filePath: string): Promise { }); } -// Tool handlers -server.setRequestHandler(ListToolsRequestSchema, async () => { +// Tool registrations + +// read_file (deprecated) and read_text_file +const readTextFileHandler = async (args: z.infer) => { + const validPath = await validatePath(args.path); + + if (args.head && args.tail) { + throw new Error("Cannot specify both head and tail parameters simultaneously"); + } + + if (args.tail) { + const tailContent = await tailFile(validPath, args.tail); + return { + content: [{ type: "text" as const, text: tailContent }], + }; + } + + if (args.head) { + const headContent = await headFile(validPath, args.head); + return { + content: [{ type: "text" as const, text: headContent }], + }; + } + const content = await readFileContent(validPath); return { - tools: [ - { - name: "read_file", - description: "Read the complete contents of a file as text. DEPRECATED: Use read_text_file instead.", - inputSchema: zodToJsonSchema(ReadTextFileArgsSchema) as ToolInput, - }, - { - name: "read_text_file", - description: - "Read the complete contents of a file from the file system as text. " + - "Handles various text encodings and provides detailed error messages " + - "if the file cannot be read. Use this tool when you need to examine " + - "the contents of a single file. Use the 'head' parameter to read only " + - "the first N lines of a file, or the 'tail' parameter to read only " + - "the last N lines of a file. Operates on the file as text regardless of extension. " + - "Only works within allowed directories.", - inputSchema: zodToJsonSchema(ReadTextFileArgsSchema) as ToolInput, - }, - { - name: "read_media_file", - description: - "Read an image or audio file. Returns the base64 encoded data and MIME type. " + - "Only works within allowed directories.", - inputSchema: zodToJsonSchema(ReadMediaFileArgsSchema) as ToolInput, - }, - { - name: "read_multiple_files", - description: - "Read the contents of multiple files simultaneously. This is more " + - "efficient than reading files one by one when you need to analyze " + - "or compare multiple files. Each file's content is returned with its " + - "path as a reference. Failed reads for individual files won't stop " + - "the entire operation. Only works within allowed directories.", - inputSchema: zodToJsonSchema(ReadMultipleFilesArgsSchema) as ToolInput, - }, - { - name: "write_file", - description: - "Create a new file or completely overwrite an existing file with new content. " + - "Use with caution as it will overwrite existing files without warning. " + - "Handles text content with proper encoding. Only works within allowed directories.", - inputSchema: zodToJsonSchema(WriteFileArgsSchema) as ToolInput, - }, - { - name: "edit_file", - description: - "Make line-based edits to a text file. Each edit replaces exact line sequences " + - "with new content. Returns a git-style diff showing the changes made. " + - "Only works within allowed directories.", - inputSchema: zodToJsonSchema(EditFileArgsSchema) as ToolInput, - }, - { - name: "create_directory", - description: - "Create a new directory or ensure a directory exists. Can create multiple " + - "nested directories in one operation. If the directory already exists, " + - "this operation will succeed silently. Perfect for setting up directory " + - "structures for projects or ensuring required paths exist. Only works within allowed directories.", - inputSchema: zodToJsonSchema(CreateDirectoryArgsSchema) as ToolInput, - }, - { - name: "list_directory", - description: - "Get a detailed listing of all files and directories in a specified path. " + - "Results clearly distinguish between files and directories with [FILE] and [DIR] " + - "prefixes. This tool is essential for understanding directory structure and " + - "finding specific files within a directory. Only works within allowed directories.", - inputSchema: zodToJsonSchema(ListDirectoryArgsSchema) as ToolInput, - }, - { - name: "list_directory_with_sizes", - description: - "Get a detailed listing of all files and directories in a specified path, including sizes. " + - "Results clearly distinguish between files and directories with [FILE] and [DIR] " + - "prefixes. This tool is useful for understanding directory structure and " + - "finding specific files within a directory. Only works within allowed directories.", - inputSchema: zodToJsonSchema(ListDirectoryWithSizesArgsSchema) as ToolInput, - }, - { - name: "directory_tree", - description: - "Get a recursive tree view of files and directories as a JSON structure. " + - "Each entry includes 'name', 'type' (file/directory), and 'children' for directories. " + - "Files have no children array, while directories always have a children array (which may be empty). " + - "The output is formatted with 2-space indentation for readability. Only works within allowed directories.", - inputSchema: zodToJsonSchema(DirectoryTreeArgsSchema) as ToolInput, - }, - { - name: "move_file", - description: - "Move or rename files and directories. Can move files between directories " + - "and rename them in a single operation. If the destination exists, the " + - "operation will fail. Works across different directories and can be used " + - "for simple renaming within the same directory. Both source and destination must be within allowed directories.", - inputSchema: zodToJsonSchema(MoveFileArgsSchema) as ToolInput, - }, - { - name: "search_files", - description: - "Recursively search for files and directories matching a pattern. " + - "The patterns should be glob-style patterns that match paths relative to the working directory. " + - "Use pattern like '*.ext' to match files in current directory, and '**/*.ext' to match files in all subdirectories. " + - "Returns full paths to all matching items. Great for finding files when you don't know their exact location. " + - "Only searches within allowed directories.", - inputSchema: zodToJsonSchema(SearchFilesArgsSchema) as ToolInput, - }, - { - name: "get_file_info", - description: - "Retrieve detailed metadata about a file or directory. Returns comprehensive " + - "information including size, creation time, last modified time, permissions, " + - "and type. This tool is perfect for understanding file characteristics " + - "without reading the actual content. Only works within allowed directories.", - inputSchema: zodToJsonSchema(GetFileInfoArgsSchema) as ToolInput, - }, - { - name: "list_allowed_directories", - description: - "Returns the list of directories that this server is allowed to access. " + - "Subdirectories within these allowed directories are also accessible. " + - "Use this to understand which directories and their nested paths are available " + - "before trying to access files.", - inputSchema: { - type: "object", - properties: {}, - required: [], - }, - }, - ], + content: [{ type: "text" as const, text: content }], }; -}); +}; + +server.registerTool( + "read_file", + { + title: "Read File (Deprecated)", + description: "Read the complete contents of a file as text. DEPRECATED: Use read_text_file instead.", + inputSchema: { + path: z.string(), + tail: z.number().optional().describe("If provided, returns only the last N lines of the file"), + head: z.number().optional().describe("If provided, returns only the first N lines of the file") + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + readTextFileHandler +); +server.registerTool( + "read_text_file", + { + title: "Read Text File", + description: + "Read the complete contents of a file from the file system as text. " + + "Handles various text encodings and provides detailed error messages " + + "if the file cannot be read. Use this tool when you need to examine " + + "the contents of a single file. Use the 'head' parameter to read only " + + "the first N lines of a file, or the 'tail' parameter to read only " + + "the last N lines of a file. Operates on the file as text regardless of extension. " + + "Only works within allowed directories.", + inputSchema: { + path: z.string(), + tail: z.number().optional().describe("If provided, returns only the last N lines of the file"), + head: z.number().optional().describe("If provided, returns only the first N lines of the file") + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + readTextFileHandler +); -server.setRequestHandler(CallToolRequestSchema, async (request) => { - try { - const { name, arguments: args } = request.params; - - switch (name) { - case "read_file": - case "read_text_file": { - const parsed = ReadTextFileArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for read_text_file: ${parsed.error}`); - } - const validPath = await validatePath(parsed.data.path); +server.registerTool( + "read_media_file", + { + title: "Read Media File", + description: + "Read an image or audio file. Returns the base64 encoded data and MIME type. " + + "Only works within allowed directories.", + inputSchema: { + path: z.string() + }, + outputSchema: { + content: z.array(z.object({ + type: z.enum(["image", "audio"]), + data: z.string(), + mimeType: z.string() + })) + } + }, + async (args: z.infer) => { + const validPath = await validatePath(args.path); + const extension = path.extname(validPath).toLowerCase(); + const mimeTypes: Record = { + ".png": "image/png", + ".jpg": "image/jpeg", + ".jpeg": "image/jpeg", + ".gif": "image/gif", + ".webp": "image/webp", + ".bmp": "image/bmp", + ".svg": "image/svg+xml", + ".mp3": "audio/mpeg", + ".wav": "audio/wav", + ".ogg": "audio/ogg", + ".flac": "audio/flac", + }; + const mimeType = mimeTypes[extension] || "application/octet-stream"; + const data = await readFileAsBase64Stream(validPath); + + if (mimeType.startsWith("audio/")) { + return { + content: [{ type: "audio" as const, data, mimeType }], + }; + } else { + // For all other media types including images and unknown types, return as image + // (MCP ImageContent can handle any base64-encoded binary data with appropriate mimeType) + return { + content: [{ type: "image" as const, data, mimeType }], + }; + } + } +); - if (parsed.data.head && parsed.data.tail) { - throw new Error("Cannot specify both head and tail parameters simultaneously"); +server.registerTool( + "read_multiple_files", + { + title: "Read Multiple Files", + description: + "Read the contents of multiple files simultaneously. This is more " + + "efficient than reading files one by one when you need to analyze " + + "or compare multiple files. Each file's content is returned with its " + + "path as a reference. Failed reads for individual files won't stop " + + "the entire operation. Only works within allowed directories.", + inputSchema: { + paths: z.array(z.string()) + .min(1) + .describe("Array of file paths to read. Each path must be a string pointing to a valid file within allowed directories.") + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async (args: z.infer) => { + const results = await Promise.all( + args.paths.map(async (filePath: string) => { + try { + const validPath = await validatePath(filePath); + const content = await readFileContent(validPath); + return `${filePath}:\n${content}\n`; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return `${filePath}: Error - ${errorMessage}`; } + }), + ); + return { + content: [{ type: "text" as const, text: results.join("\n---\n") }], + }; + } +); + +server.registerTool( + "write_file", + { + title: "Write File", + description: + "Create a new file or completely overwrite an existing file with new content. " + + "Use with caution as it will overwrite existing files without warning. " + + "Handles text content with proper encoding. Only works within allowed directories.", + inputSchema: { + path: z.string(), + content: z.string() + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async (args: z.infer) => { + const validPath = await validatePath(args.path); + await writeFileContent(validPath, args.content); + return { + content: [{ type: "text" as const, text: `Successfully wrote to ${args.path}` }], + }; + } +); + +server.registerTool( + "edit_file", + { + title: "Edit File", + description: + "Make line-based edits to a text file. Each edit replaces exact line sequences " + + "with new content. Returns a git-style diff showing the changes made. " + + "Only works within allowed directories.", + inputSchema: { + path: z.string(), + edits: z.array(z.object({ + oldText: z.string().describe("Text to search for - must match exactly"), + newText: z.string().describe("Text to replace with") + })), + dryRun: z.boolean().default(false).describe("Preview changes using git-style diff format") + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async (args: z.infer) => { + const validPath = await validatePath(args.path); + const result = await applyFileEdits(validPath, args.edits, args.dryRun); + return { + content: [{ type: "text" as const, text: result }], + }; + } +); - if (parsed.data.tail) { - // Use memory-efficient tail implementation for large files - const tailContent = await tailFile(validPath, parsed.data.tail); +server.registerTool( + "create_directory", + { + title: "Create Directory", + description: + "Create a new directory or ensure a directory exists. Can create multiple " + + "nested directories in one operation. If the directory already exists, " + + "this operation will succeed silently. Perfect for setting up directory " + + "structures for projects or ensuring required paths exist. Only works within allowed directories.", + inputSchema: { + path: z.string() + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async (args: z.infer) => { + const validPath = await validatePath(args.path); + await fs.mkdir(validPath, { recursive: true }); + return { + content: [{ type: "text" as const, text: `Successfully created directory ${args.path}` }], + }; + } +); + +server.registerTool( + "list_directory", + { + title: "List Directory", + description: + "Get a detailed listing of all files and directories in a specified path. " + + "Results clearly distinguish between files and directories with [FILE] and [DIR] " + + "prefixes. This tool is essential for understanding directory structure and " + + "finding specific files within a directory. Only works within allowed directories.", + inputSchema: { + path: z.string() + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async (args: z.infer) => { + const validPath = await validatePath(args.path); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + const formatted = entries + .map((entry) => `${entry.isDirectory() ? "[DIR]" : "[FILE]"} ${entry.name}`) + .join("\n"); + return { + content: [{ type: "text" as const, text: formatted }], + }; + } +); + +server.registerTool( + "list_directory_with_sizes", + { + title: "List Directory with Sizes", + description: + "Get a detailed listing of all files and directories in a specified path, including sizes. " + + "Results clearly distinguish between files and directories with [FILE] and [DIR] " + + "prefixes. This tool is useful for understanding directory structure and " + + "finding specific files within a directory. Only works within allowed directories.", + inputSchema: { + path: z.string(), + sortBy: z.enum(["name", "size"]).optional().default("name").describe("Sort entries by name or size") + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async (args: z.infer) => { + const validPath = await validatePath(args.path); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + + // Get detailed information for each entry + const detailedEntries = await Promise.all( + entries.map(async (entry) => { + const entryPath = path.join(validPath, entry.name); + try { + const stats = await fs.stat(entryPath); return { - content: [{ type: "text", text: tailContent }], + name: entry.name, + isDirectory: entry.isDirectory(), + size: stats.size, + mtime: stats.mtime }; - } - - if (parsed.data.head) { - // Use memory-efficient head implementation for large files - const headContent = await headFile(validPath, parsed.data.head); + } catch (error) { return { - content: [{ type: "text", text: headContent }], + name: entry.name, + isDirectory: entry.isDirectory(), + size: 0, + mtime: new Date(0) }; } - const content = await readFileContent(validPath); - return { - content: [{ type: "text", text: content }], - }; - } + }) + ); - case "read_media_file": { - const parsed = ReadMediaFileArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for read_media_file: ${parsed.error}`); - } - const validPath = await validatePath(parsed.data.path); - const extension = path.extname(validPath).toLowerCase(); - const mimeTypes: Record = { - ".png": "image/png", - ".jpg": "image/jpeg", - ".jpeg": "image/jpeg", - ".gif": "image/gif", - ".webp": "image/webp", - ".bmp": "image/bmp", - ".svg": "image/svg+xml", - ".mp3": "audio/mpeg", - ".wav": "audio/wav", - ".ogg": "audio/ogg", - ".flac": "audio/flac", - }; - const mimeType = mimeTypes[extension] || "application/octet-stream"; - const data = await readFileAsBase64Stream(validPath); - const type = mimeType.startsWith("image/") - ? "image" - : mimeType.startsWith("audio/") - ? "audio" - : "blob"; - return { - content: [{ type, data, mimeType }], - }; - } - - case "read_multiple_files": { - const parsed = ReadMultipleFilesArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for read_multiple_files: ${parsed.error}`); - } - const results = await Promise.all( - parsed.data.paths.map(async (filePath: string) => { - try { - const validPath = await validatePath(filePath); - const content = await readFileContent(validPath); - return `${filePath}:\n${content}\n`; - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); - return `${filePath}: Error - ${errorMessage}`; - } - }), - ); - return { - content: [{ type: "text", text: results.join("\n---\n") }], - }; + // Sort entries based on sortBy parameter + const sortedEntries = [...detailedEntries].sort((a, b) => { + if (args.sortBy === 'size') { + return b.size - a.size; // Descending by size } + // Default sort by name + return a.name.localeCompare(b.name); + }); - case "write_file": { - const parsed = WriteFileArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for write_file: ${parsed.error}`); - } - const validPath = await validatePath(parsed.data.path); - await writeFileContent(validPath, parsed.data.content); - return { - content: [{ type: "text", text: `Successfully wrote to ${parsed.data.path}` }], - }; - } + // Format the output + const formattedEntries = sortedEntries.map(entry => + `${entry.isDirectory ? "[DIR]" : "[FILE]"} ${entry.name.padEnd(30)} ${ + entry.isDirectory ? "" : formatSize(entry.size).padStart(10) + }` + ); - case "edit_file": { - const parsed = EditFileArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for edit_file: ${parsed.error}`); - } - const validPath = await validatePath(parsed.data.path); - const result = await applyFileEdits(validPath, parsed.data.edits, parsed.data.dryRun); - return { - content: [{ type: "text", text: result }], - }; - } + // Add summary + const totalFiles = detailedEntries.filter(e => !e.isDirectory).length; + const totalDirs = detailedEntries.filter(e => e.isDirectory).length; + const totalSize = detailedEntries.reduce((sum, entry) => sum + (entry.isDirectory ? 0 : entry.size), 0); - case "create_directory": { - const parsed = CreateDirectoryArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for create_directory: ${parsed.error}`); - } - const validPath = await validatePath(parsed.data.path); - await fs.mkdir(validPath, { recursive: true }); - return { - content: [{ type: "text", text: `Successfully created directory ${parsed.data.path}` }], - }; - } + const summary = [ + "", + `Total: ${totalFiles} files, ${totalDirs} directories`, + `Combined size: ${formatSize(totalSize)}` + ]; - case "list_directory": { - const parsed = ListDirectoryArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for list_directory: ${parsed.error}`); - } - const validPath = await validatePath(parsed.data.path); - const entries = await fs.readdir(validPath, { withFileTypes: true }); - const formatted = entries - .map((entry) => `${entry.isDirectory() ? "[DIR]" : "[FILE]"} ${entry.name}`) - .join("\n"); - return { - content: [{ type: "text", text: formatted }], - }; - } + return { + content: [{ + type: "text" as const, + text: [...formattedEntries, ...summary].join("\n") + }], + }; + } +); - case "list_directory_with_sizes": { - const parsed = ListDirectoryWithSizesArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for list_directory_with_sizes: ${parsed.error}`); - } - const validPath = await validatePath(parsed.data.path); - const entries = await fs.readdir(validPath, { withFileTypes: true }); - - // Get detailed information for each entry - const detailedEntries = await Promise.all( - entries.map(async (entry) => { - const entryPath = path.join(validPath, entry.name); - try { - const stats = await fs.stat(entryPath); - return { - name: entry.name, - isDirectory: entry.isDirectory(), - size: stats.size, - mtime: stats.mtime - }; - } catch (error) { - return { - name: entry.name, - isDirectory: entry.isDirectory(), - size: 0, - mtime: new Date(0) - }; - } - }) - ); - - // Sort entries based on sortBy parameter - const sortedEntries = [...detailedEntries].sort((a, b) => { - if (parsed.data.sortBy === 'size') { - return b.size - a.size; // Descending by size +server.registerTool( + "directory_tree", + { + title: "Directory Tree", + description: + "Get a recursive tree view of files and directories as a JSON structure. " + + "Each entry includes 'name', 'type' (file/directory), and 'children' for directories. " + + "Files have no children array, while directories always have a children array (which may be empty). " + + "The output is formatted with 2-space indentation for readability. Only works within allowed directories.", + inputSchema: { + path: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async (args: z.infer) => { + interface TreeEntry { + name: string; + type: 'file' | 'directory'; + children?: TreeEntry[]; + } + const rootPath = args.path; + + async function buildTree(currentPath: string, excludePatterns: string[] = []): Promise { + const validPath = await validatePath(currentPath); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + const result: TreeEntry[] = []; + + for (const entry of entries) { + const relativePath = path.relative(rootPath, path.join(currentPath, entry.name)); + const shouldExclude = excludePatterns.some(pattern => { + if (pattern.includes('*')) { + return minimatch(relativePath, pattern, { dot: true }); } - // Default sort by name - return a.name.localeCompare(b.name); + // For files: match exact name or as part of path + // For directories: match as directory path + return minimatch(relativePath, pattern, { dot: true }) || + minimatch(relativePath, `**/${pattern}`, { dot: true }) || + minimatch(relativePath, `**/${pattern}/**`, { dot: true }); }); + if (shouldExclude) + continue; - // Format the output - const formattedEntries = sortedEntries.map(entry => - `${entry.isDirectory ? "[DIR]" : "[FILE]"} ${entry.name.padEnd(30)} ${ - entry.isDirectory ? "" : formatSize(entry.size).padStart(10) - }` - ); - - // Add summary - const totalFiles = detailedEntries.filter(e => !e.isDirectory).length; - const totalDirs = detailedEntries.filter(e => e.isDirectory).length; - const totalSize = detailedEntries.reduce((sum, entry) => sum + (entry.isDirectory ? 0 : entry.size), 0); - - const summary = [ - "", - `Total: ${totalFiles} files, ${totalDirs} directories`, - `Combined size: ${formatSize(totalSize)}` - ]; - - return { - content: [{ - type: "text", - text: [...formattedEntries, ...summary].join("\n") - }], + const entryData: TreeEntry = { + name: entry.name, + type: entry.isDirectory() ? 'directory' : 'file' }; - } - - case "directory_tree": { - const parsed = DirectoryTreeArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`); - } - interface TreeEntry { - name: string; - type: 'file' | 'directory'; - children?: TreeEntry[]; - } - const rootPath = parsed.data.path; - - async function buildTree(currentPath: string, excludePatterns: string[] = []): Promise { - const validPath = await validatePath(currentPath); - const entries = await fs.readdir(validPath, {withFileTypes: true}); - const result: TreeEntry[] = []; - - for (const entry of entries) { - const relativePath = path.relative(rootPath, path.join(currentPath, entry.name)); - const shouldExclude = excludePatterns.some(pattern => { - if (pattern.includes('*')) { - return minimatch(relativePath, pattern, {dot: true}); - } - // For files: match exact name or as part of path - // For directories: match as directory path - return minimatch(relativePath, pattern, {dot: true}) || - minimatch(relativePath, `**/${pattern}`, {dot: true}) || - minimatch(relativePath, `**/${pattern}/**`, {dot: true}); - }); - if (shouldExclude) - continue; - - const entryData: TreeEntry = { - name: entry.name, - type: entry.isDirectory() ? 'directory' : 'file' - }; - - if (entry.isDirectory()) { - const subPath = path.join(currentPath, entry.name); - entryData.children = await buildTree(subPath, excludePatterns); - } - - result.push(entryData); - } - - return result; + if (entry.isDirectory()) { + const subPath = path.join(currentPath, entry.name); + entryData.children = await buildTree(subPath, excludePatterns); } - const treeData = await buildTree(rootPath, parsed.data.excludePatterns); - return { - content: [{ - type: "text", - text: JSON.stringify(treeData, null, 2) - }], - }; + result.push(entryData); } - case "move_file": { - const parsed = MoveFileArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for move_file: ${parsed.error}`); - } - const validSourcePath = await validatePath(parsed.data.source); - const validDestPath = await validatePath(parsed.data.destination); - await fs.rename(validSourcePath, validDestPath); - return { - content: [{ type: "text", text: `Successfully moved ${parsed.data.source} to ${parsed.data.destination}` }], - }; - } + return result; + } - case "search_files": { - const parsed = SearchFilesArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for search_files: ${parsed.error}`); - } - const validPath = await validatePath(parsed.data.path); - const results = await searchFilesWithValidation(validPath, parsed.data.pattern, allowedDirectories, { excludePatterns: parsed.data.excludePatterns }); - return { - content: [{ type: "text", text: results.length > 0 ? results.join("\n") : "No matches found" }], - }; - } + const treeData = await buildTree(rootPath, args.excludePatterns); + return { + content: [{ + type: "text" as const, + text: JSON.stringify(treeData, null, 2) + }], + }; + } +); - case "get_file_info": { - const parsed = GetFileInfoArgsSchema.safeParse(args); - if (!parsed.success) { - throw new Error(`Invalid arguments for get_file_info: ${parsed.error}`); - } - const validPath = await validatePath(parsed.data.path); - const info = await getFileStats(validPath); - return { - content: [{ type: "text", text: Object.entries(info) - .map(([key, value]) => `${key}: ${value}`) - .join("\n") }], - }; - } +server.registerTool( + "move_file", + { + title: "Move File", + description: + "Move or rename files and directories. Can move files between directories " + + "and rename them in a single operation. If the destination exists, the " + + "operation will fail. Works across different directories and can be used " + + "for simple renaming within the same directory. Both source and destination must be within allowed directories.", + inputSchema: { + source: z.string(), + destination: z.string() + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async (args: z.infer) => { + const validSourcePath = await validatePath(args.source); + const validDestPath = await validatePath(args.destination); + await fs.rename(validSourcePath, validDestPath); + return { + content: [{ type: "text" as const, text: `Successfully moved ${args.source} to ${args.destination}` }], + }; + } +); - case "list_allowed_directories": { - return { - content: [{ - type: "text", - text: `Allowed directories:\n${allowedDirectories.join('\n')}` - }], - }; - } +server.registerTool( + "search_files", + { + title: "Search Files", + description: + "Recursively search for files and directories matching a pattern. " + + "The patterns should be glob-style patterns that match paths relative to the working directory. " + + "Use pattern like '*.ext' to match files in current directory, and '**/*.ext' to match files in all subdirectories. " + + "Returns full paths to all matching items. Great for finding files when you don't know their exact location. " + + "Only searches within allowed directories.", + inputSchema: { + path: z.string(), + pattern: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async (args: z.infer) => { + const validPath = await validatePath(args.path); + const results = await searchFilesWithValidation(validPath, args.pattern, allowedDirectories, { excludePatterns: args.excludePatterns }); + return { + content: [{ type: "text" as const, text: results.length > 0 ? results.join("\n") : "No matches found" }], + }; + } +); - default: - throw new Error(`Unknown tool: ${name}`); +server.registerTool( + "get_file_info", + { + title: "Get File Info", + description: + "Retrieve detailed metadata about a file or directory. Returns comprehensive " + + "information including size, creation time, last modified time, permissions, " + + "and type. This tool is perfect for understanding file characteristics " + + "without reading the actual content. Only works within allowed directories.", + inputSchema: { + path: z.string() + }, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) } - } catch (error) { - const errorMessage = error instanceof Error ? error.message : String(error); + }, + async (args: z.infer) => { + const validPath = await validatePath(args.path); + const info = await getFileStats(validPath); return { - content: [{ type: "text", text: `Error: ${errorMessage}` }], - isError: true, + content: [{ type: "text" as const, text: Object.entries(info) + .map(([key, value]) => `${key}: ${value}`) + .join("\n") }], }; } -}); +); + +server.registerTool( + "list_allowed_directories", + { + title: "List Allowed Directories", + description: + "Returns the list of directories that this server is allowed to access. " + + "Subdirectories within these allowed directories are also accessible. " + + "Use this to understand which directories and their nested paths are available " + + "before trying to access files.", + inputSchema: {}, + outputSchema: { + content: z.array(z.object({ + type: z.literal("text"), + text: z.string() + })) + } + }, + async () => { + return { + content: [{ + type: "text" as const, + text: `Allowed directories:\n${allowedDirectories.join('\n')}` + }], + }; + } +); // Updates allowed directories based on MCP client roots async function updateAllowedDirectoriesFromRoots(requestedRoots: Root[]) { @@ -653,10 +740,10 @@ async function updateAllowedDirectoriesFromRoots(requestedRoots: Root[]) { } // Handles dynamic roots updates during runtime, when client sends "roots/list_changed" notification, server fetches the updated roots and replaces all allowed directories with the new roots. -server.setNotificationHandler(RootsListChangedNotificationSchema, async () => { +server.server.setNotificationHandler(RootsListChangedNotificationSchema, async () => { try { // Request the updated roots list from the client - const response = await server.listRoots(); + const response = await server.server.listRoots(); if (response && 'roots' in response) { await updateAllowedDirectoriesFromRoots(response.roots); } @@ -666,12 +753,12 @@ server.setNotificationHandler(RootsListChangedNotificationSchema, async () => { }); // Handles post-initialization setup, specifically checking for and fetching MCP roots. -server.oninitialized = async () => { - const clientCapabilities = server.getClientCapabilities(); +server.server.oninitialized = async () => { + const clientCapabilities = server.server.getClientCapabilities(); if (clientCapabilities?.roots) { try { - const response = await server.listRoots(); + const response = await server.server.listRoots(); if (response && 'roots' in response) { await updateAllowedDirectoriesFromRoots(response.roots); } else { diff --git a/src/filesystem/tsconfig.json b/src/filesystem/tsconfig.json index 31a299d96b..db219c5b45 100644 --- a/src/filesystem/tsconfig.json +++ b/src/filesystem/tsconfig.json @@ -12,6 +12,7 @@ "exclude": [ "**/__tests__/**", "**/*.test.ts", - "**/*.spec.ts" + "**/*.spec.ts", + "vitest.config.ts" ] } diff --git a/src/memory/index.ts b/src/memory/index.ts index 94585a4481..c7d781d2c4 100644 --- a/src/memory/index.ts +++ b/src/memory/index.ts @@ -1,11 +1,8 @@ #!/usr/bin/env node -import { Server } from "@modelcontextprotocol/sdk/server/index.js"; +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; -import { - CallToolRequestSchema, - ListToolsRequestSchema, -} from "@modelcontextprotocol/sdk/types.js"; +import { z } from "zod"; import { promises as fs } from 'fs'; import path from 'path'; import { fileURLToPath } from 'url'; @@ -226,243 +223,235 @@ export class KnowledgeGraphManager { let knowledgeGraphManager: KnowledgeGraphManager; +// Zod schemas for entities and relations +const EntitySchema = z.object({ + name: z.string().describe("The name of the entity"), + entityType: z.string().describe("The type of the entity"), + observations: z.array(z.string()).describe("An array of observation contents associated with the entity") +}); + +const RelationSchema = z.object({ + from: z.string().describe("The name of the entity where the relation starts"), + to: z.string().describe("The name of the entity where the relation ends"), + relationType: z.string().describe("The type of the relation") +}); // The server instance and tools exposed to Claude -const server = new Server({ +const server = new McpServer({ name: "memory-server", version: "0.6.3", -}, { - capabilities: { - tools: {}, - }, - },); - -server.setRequestHandler(ListToolsRequestSchema, async () => { - return { - tools: [ - { - name: "create_entities", - description: "Create multiple new entities in the knowledge graph", - inputSchema: { - type: "object", - properties: { - entities: { - type: "array", - items: { - type: "object", - properties: { - name: { type: "string", description: "The name of the entity" }, - entityType: { type: "string", description: "The type of the entity" }, - observations: { - type: "array", - items: { type: "string" }, - description: "An array of observation contents associated with the entity" - }, - }, - required: ["name", "entityType", "observations"], - additionalProperties: false, - }, - }, - }, - required: ["entities"], - additionalProperties: false, - }, - }, - { - name: "create_relations", - description: "Create multiple new relations between entities in the knowledge graph. Relations should be in active voice", - inputSchema: { - type: "object", - properties: { - relations: { - type: "array", - items: { - type: "object", - properties: { - from: { type: "string", description: "The name of the entity where the relation starts" }, - to: { type: "string", description: "The name of the entity where the relation ends" }, - relationType: { type: "string", description: "The type of the relation" }, - }, - required: ["from", "to", "relationType"], - additionalProperties: false, - }, - }, - }, - required: ["relations"], - additionalProperties: false, - }, - }, - { - name: "add_observations", - description: "Add new observations to existing entities in the knowledge graph", - inputSchema: { - type: "object", - properties: { - observations: { - type: "array", - items: { - type: "object", - properties: { - entityName: { type: "string", description: "The name of the entity to add the observations to" }, - contents: { - type: "array", - items: { type: "string" }, - description: "An array of observation contents to add" - }, - }, - required: ["entityName", "contents"], - additionalProperties: false, - }, - }, - }, - required: ["observations"], - additionalProperties: false, - }, - }, - { - name: "delete_entities", - description: "Delete multiple entities and their associated relations from the knowledge graph", - inputSchema: { - type: "object", - properties: { - entityNames: { - type: "array", - items: { type: "string" }, - description: "An array of entity names to delete" - }, - }, - required: ["entityNames"], - additionalProperties: false, - }, - }, - { - name: "delete_observations", - description: "Delete specific observations from entities in the knowledge graph", - inputSchema: { - type: "object", - properties: { - deletions: { - type: "array", - items: { - type: "object", - properties: { - entityName: { type: "string", description: "The name of the entity containing the observations" }, - observations: { - type: "array", - items: { type: "string" }, - description: "An array of observations to delete" - }, - }, - required: ["entityName", "observations"], - additionalProperties: false, - }, - }, - }, - required: ["deletions"], - additionalProperties: false, - }, - }, - { - name: "delete_relations", - description: "Delete multiple relations from the knowledge graph", - inputSchema: { - type: "object", - properties: { - relations: { - type: "array", - items: { - type: "object", - properties: { - from: { type: "string", description: "The name of the entity where the relation starts" }, - to: { type: "string", description: "The name of the entity where the relation ends" }, - relationType: { type: "string", description: "The type of the relation" }, - }, - required: ["from", "to", "relationType"], - additionalProperties: false, - }, - description: "An array of relations to delete" - }, - }, - required: ["relations"], - additionalProperties: false, - }, - }, - { - name: "read_graph", - description: "Read the entire knowledge graph", - inputSchema: { - type: "object", - properties: {}, - additionalProperties: false, - }, - }, - { - name: "search_nodes", - description: "Search for nodes in the knowledge graph based on a query", - inputSchema: { - type: "object", - properties: { - query: { type: "string", description: "The search query to match against entity names, types, and observation content" }, - }, - required: ["query"], - additionalProperties: false, - }, - }, - { - name: "open_nodes", - description: "Open specific nodes in the knowledge graph by their names", - inputSchema: { - type: "object", - properties: { - names: { - type: "array", - items: { type: "string" }, - description: "An array of entity names to retrieve", - }, - }, - required: ["names"], - additionalProperties: false, - }, - }, - ], - }; }); -server.setRequestHandler(CallToolRequestSchema, async (request) => { - const { name, arguments: args } = request.params; +// Register create_entities tool +server.registerTool( + "create_entities", + { + title: "Create Entities", + description: "Create multiple new entities in the knowledge graph", + inputSchema: { + entities: z.array(EntitySchema) + }, + outputSchema: { + entities: z.array(EntitySchema) + } + }, + async ({ entities }) => { + const result = await knowledgeGraphManager.createEntities(entities); + return { + content: [{ type: "text" as const, text: JSON.stringify(result, null, 2) }], + structuredContent: { entities: result } + }; + } +); - if (name === "read_graph") { - return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.readGraph(), null, 2) }] }; +// Register create_relations tool +server.registerTool( + "create_relations", + { + title: "Create Relations", + description: "Create multiple new relations between entities in the knowledge graph. Relations should be in active voice", + inputSchema: { + relations: z.array(RelationSchema) + }, + outputSchema: { + relations: z.array(RelationSchema) + } + }, + async ({ relations }) => { + const result = await knowledgeGraphManager.createRelations(relations); + return { + content: [{ type: "text" as const, text: JSON.stringify(result, null, 2) }], + structuredContent: { relations: result } + }; } +); - if (!args) { - throw new Error(`No arguments provided for tool: ${name}`); +// Register add_observations tool +server.registerTool( + "add_observations", + { + title: "Add Observations", + description: "Add new observations to existing entities in the knowledge graph", + inputSchema: { + observations: z.array(z.object({ + entityName: z.string().describe("The name of the entity to add the observations to"), + contents: z.array(z.string()).describe("An array of observation contents to add") + })) + }, + outputSchema: { + results: z.array(z.object({ + entityName: z.string(), + addedObservations: z.array(z.string()) + })) + } + }, + async ({ observations }) => { + const result = await knowledgeGraphManager.addObservations(observations); + return { + content: [{ type: "text" as const, text: JSON.stringify(result, null, 2) }], + structuredContent: { results: result } + }; } +); - switch (name) { - case "create_entities": - return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.createEntities(args.entities as Entity[]), null, 2) }] }; - case "create_relations": - return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.createRelations(args.relations as Relation[]), null, 2) }] }; - case "add_observations": - return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.addObservations(args.observations as { entityName: string; contents: string[] }[]), null, 2) }] }; - case "delete_entities": - await knowledgeGraphManager.deleteEntities(args.entityNames as string[]); - return { content: [{ type: "text", text: "Entities deleted successfully" }] }; - case "delete_observations": - await knowledgeGraphManager.deleteObservations(args.deletions as { entityName: string; observations: string[] }[]); - return { content: [{ type: "text", text: "Observations deleted successfully" }] }; - case "delete_relations": - await knowledgeGraphManager.deleteRelations(args.relations as Relation[]); - return { content: [{ type: "text", text: "Relations deleted successfully" }] }; - case "search_nodes": - return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.searchNodes(args.query as string), null, 2) }] }; - case "open_nodes": - return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.openNodes(args.names as string[]), null, 2) }] }; - default: - throw new Error(`Unknown tool: ${name}`); +// Register delete_entities tool +server.registerTool( + "delete_entities", + { + title: "Delete Entities", + description: "Delete multiple entities and their associated relations from the knowledge graph", + inputSchema: { + entityNames: z.array(z.string()).describe("An array of entity names to delete") + }, + outputSchema: { + success: z.boolean(), + message: z.string() + } + }, + async ({ entityNames }) => { + await knowledgeGraphManager.deleteEntities(entityNames); + return { + content: [{ type: "text" as const, text: "Entities deleted successfully" }], + structuredContent: { success: true, message: "Entities deleted successfully" } + }; } -}); +); + +// Register delete_observations tool +server.registerTool( + "delete_observations", + { + title: "Delete Observations", + description: "Delete specific observations from entities in the knowledge graph", + inputSchema: { + deletions: z.array(z.object({ + entityName: z.string().describe("The name of the entity containing the observations"), + observations: z.array(z.string()).describe("An array of observations to delete") + })) + }, + outputSchema: { + success: z.boolean(), + message: z.string() + } + }, + async ({ deletions }) => { + await knowledgeGraphManager.deleteObservations(deletions); + return { + content: [{ type: "text" as const, text: "Observations deleted successfully" }], + structuredContent: { success: true, message: "Observations deleted successfully" } + }; + } +); + +// Register delete_relations tool +server.registerTool( + "delete_relations", + { + title: "Delete Relations", + description: "Delete multiple relations from the knowledge graph", + inputSchema: { + relations: z.array(RelationSchema).describe("An array of relations to delete") + }, + outputSchema: { + success: z.boolean(), + message: z.string() + } + }, + async ({ relations }) => { + await knowledgeGraphManager.deleteRelations(relations); + return { + content: [{ type: "text" as const, text: "Relations deleted successfully" }], + structuredContent: { success: true, message: "Relations deleted successfully" } + }; + } +); + +// Register read_graph tool +server.registerTool( + "read_graph", + { + title: "Read Graph", + description: "Read the entire knowledge graph", + inputSchema: {}, + outputSchema: { + entities: z.array(EntitySchema), + relations: z.array(RelationSchema) + } + }, + async () => { + const graph = await knowledgeGraphManager.readGraph(); + return { + content: [{ type: "text" as const, text: JSON.stringify(graph, null, 2) }], + structuredContent: { ...graph } + }; + } +); + +// Register search_nodes tool +server.registerTool( + "search_nodes", + { + title: "Search Nodes", + description: "Search for nodes in the knowledge graph based on a query", + inputSchema: { + query: z.string().describe("The search query to match against entity names, types, and observation content") + }, + outputSchema: { + entities: z.array(EntitySchema), + relations: z.array(RelationSchema) + } + }, + async ({ query }) => { + const graph = await knowledgeGraphManager.searchNodes(query); + return { + content: [{ type: "text" as const, text: JSON.stringify(graph, null, 2) }], + structuredContent: { ...graph } + }; + } +); + +// Register open_nodes tool +server.registerTool( + "open_nodes", + { + title: "Open Nodes", + description: "Open specific nodes in the knowledge graph by their names", + inputSchema: { + names: z.array(z.string()).describe("An array of entity names to retrieve") + }, + outputSchema: { + entities: z.array(EntitySchema), + relations: z.array(RelationSchema) + } + }, + async ({ names }) => { + const graph = await knowledgeGraphManager.openNodes(names); + return { + content: [{ type: "text" as const, text: JSON.stringify(graph, null, 2) }], + structuredContent: { ...graph } + }; + } +); async function main() { // Initialize memory file path with backward compatibility diff --git a/src/memory/tsconfig.json b/src/memory/tsconfig.json index 4d33cae1df..d2d86555b0 100644 --- a/src/memory/tsconfig.json +++ b/src/memory/tsconfig.json @@ -1,11 +1,14 @@ { - "extends": "../../tsconfig.json", - "compilerOptions": { - "outDir": "./dist", - "rootDir": "." - }, - "include": [ - "./**/*.ts" - ] - } - \ No newline at end of file + "extends": "../../tsconfig.json", + "compilerOptions": { + "outDir": "./dist", + "rootDir": "." + }, + "include": [ + "./**/*.ts" + ], + "exclude": [ + "**/*.test.ts", + "vitest.config.ts" + ] +} diff --git a/src/sequentialthinking/__tests__/lib.test.ts b/src/sequentialthinking/__tests__/lib.test.ts index a97e41f5a0..2114c5ec18 100644 --- a/src/sequentialthinking/__tests__/lib.test.ts +++ b/src/sequentialthinking/__tests__/lib.test.ts @@ -22,107 +22,8 @@ describe('SequentialThinkingServer', () => { server = new SequentialThinkingServer(); }); - describe('processThought - validation', () => { - it('should reject input with missing thought', () => { - const input = { - thoughtNumber: 1, - totalThoughts: 3, - nextThoughtNeeded: true - }; - - const result = server.processThought(input); - expect(result.isError).toBe(true); - expect(result.content[0].text).toContain('Invalid thought'); - }); - - it('should reject input with non-string thought', () => { - const input = { - thought: 123, - thoughtNumber: 1, - totalThoughts: 3, - nextThoughtNeeded: true - }; - - const result = server.processThought(input); - expect(result.isError).toBe(true); - expect(result.content[0].text).toContain('Invalid thought'); - }); - - it('should reject input with missing thoughtNumber', () => { - const input = { - thought: 'Test thought', - totalThoughts: 3, - nextThoughtNeeded: true - }; - - const result = server.processThought(input); - expect(result.isError).toBe(true); - expect(result.content[0].text).toContain('Invalid thoughtNumber'); - }); - - it('should reject input with non-number thoughtNumber', () => { - const input = { - thought: 'Test thought', - thoughtNumber: '1', - totalThoughts: 3, - nextThoughtNeeded: true - }; - - const result = server.processThought(input); - expect(result.isError).toBe(true); - expect(result.content[0].text).toContain('Invalid thoughtNumber'); - }); - - it('should reject input with missing totalThoughts', () => { - const input = { - thought: 'Test thought', - thoughtNumber: 1, - nextThoughtNeeded: true - }; - - const result = server.processThought(input); - expect(result.isError).toBe(true); - expect(result.content[0].text).toContain('Invalid totalThoughts'); - }); - - it('should reject input with non-number totalThoughts', () => { - const input = { - thought: 'Test thought', - thoughtNumber: 1, - totalThoughts: '3', - nextThoughtNeeded: true - }; - - const result = server.processThought(input); - expect(result.isError).toBe(true); - expect(result.content[0].text).toContain('Invalid totalThoughts'); - }); - - it('should reject input with missing nextThoughtNeeded', () => { - const input = { - thought: 'Test thought', - thoughtNumber: 1, - totalThoughts: 3 - }; - - const result = server.processThought(input); - expect(result.isError).toBe(true); - expect(result.content[0].text).toContain('Invalid nextThoughtNeeded'); - }); - - it('should reject input with non-boolean nextThoughtNeeded', () => { - const input = { - thought: 'Test thought', - thoughtNumber: 1, - totalThoughts: 3, - nextThoughtNeeded: 'true' - }; - - const result = server.processThought(input); - expect(result.isError).toBe(true); - expect(result.content[0].text).toContain('Invalid nextThoughtNeeded'); - }); - }); + // Note: Input validation tests removed - validation now happens at the tool + // registration layer via Zod schemas before processThought is called describe('processThought - valid inputs', () => { it('should accept valid basic thought', () => { @@ -275,19 +176,6 @@ describe('SequentialThinkingServer', () => { }); describe('processThought - edge cases', () => { - it('should reject empty thought string', () => { - const input = { - thought: '', - thoughtNumber: 1, - totalThoughts: 1, - nextThoughtNeeded: false - }; - - const result = server.processThought(input); - expect(result.isError).toBe(true); - expect(result.content[0].text).toContain('Invalid thought'); - }); - it('should handle very long thought strings', () => { const input = { thought: 'a'.repeat(10000), @@ -349,25 +237,6 @@ describe('SequentialThinkingServer', () => { expect(result.content[0]).toHaveProperty('text'); }); - it('should return correct error structure on failure', () => { - const input = { - thought: 'Test', - thoughtNumber: 1, - totalThoughts: 1 - // missing nextThoughtNeeded - }; - - const result = server.processThought(input); - - expect(result).toHaveProperty('isError', true); - expect(result).toHaveProperty('content'); - expect(Array.isArray(result.content)).toBe(true); - - const errorData = JSON.parse(result.content[0].text); - expect(errorData).toHaveProperty('error'); - expect(errorData).toHaveProperty('status', 'failed'); - }); - it('should return valid JSON in response', () => { const input = { thought: 'Test thought', diff --git a/src/sequentialthinking/index.ts b/src/sequentialthinking/index.ts index 4e9da63a79..44af5c0e9d 100644 --- a/src/sequentialthinking/index.ts +++ b/src/sequentialthinking/index.ts @@ -1,17 +1,22 @@ #!/usr/bin/env node -import { Server } from "@modelcontextprotocol/sdk/server/index.js"; +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; -import { - CallToolRequestSchema, - ListToolsRequestSchema, - Tool, -} from "@modelcontextprotocol/sdk/types.js"; +import { z } from "zod"; import { SequentialThinkingServer } from './lib.js'; -const SEQUENTIAL_THINKING_TOOL: Tool = { - name: "sequentialthinking", - description: `A detailed tool for dynamic and reflective problem-solving through thoughts. +const server = new McpServer({ + name: "sequential-thinking-server", + version: "0.2.0", +}); + +const thinkingServer = new SequentialThinkingServer(); + +server.registerTool( + "sequentialthinking", + { + title: "Sequential Thinking", + description: `A detailed tool for dynamic and reflective problem-solving through thoughts. This tool helps analyze problems through a flexible thinking process that can adapt and evolve. Each thought can build on, question, or revise previous insights as understanding deepens. @@ -37,13 +42,13 @@ Key features: Parameters explained: - thought: Your current thinking step, which can include: -* Regular analytical steps -* Revisions of previous thoughts -* Questions about previous decisions -* Realizations about needing more analysis -* Changes in approach -* Hypothesis generation -* Hypothesis verification + * Regular analytical steps + * Revisions of previous thoughts + * Questions about previous decisions + * Realizations about needing more analysis + * Changes in approach + * Hypothesis generation + * Hypothesis verification - nextThoughtNeeded: True if you need more thinking, even if at what seemed like the end - thoughtNumber: Current number in sequence (can go beyond initial total if needed) - totalThoughts: Current estimate of thoughts needed (can be adjusted up/down) @@ -65,85 +70,41 @@ You should: 9. Repeat the process until satisfied with the solution 10. Provide a single, ideally correct answer as the final output 11. Only set next_thought_needed to false when truly done and a satisfactory answer is reached`, - inputSchema: { - type: "object", - properties: { - thought: { - type: "string", - description: "Your current thinking step" - }, - nextThoughtNeeded: { - type: "boolean", - description: "Whether another thought step is needed" - }, - thoughtNumber: { - type: "integer", - description: "Current thought number (numeric value, e.g., 1, 2, 3)", - minimum: 1 - }, - totalThoughts: { - type: "integer", - description: "Estimated total thoughts needed (numeric value, e.g., 5, 10)", - minimum: 1 - }, - isRevision: { - type: "boolean", - description: "Whether this revises previous thinking" - }, - revisesThought: { - type: "integer", - description: "Which thought is being reconsidered", - minimum: 1 - }, - branchFromThought: { - type: "integer", - description: "Branching point thought number", - minimum: 1 - }, - branchId: { - type: "string", - description: "Branch identifier" - }, - needsMoreThoughts: { - type: "boolean", - description: "If more thoughts are needed" - } + inputSchema: { + thought: z.string().describe("Your current thinking step"), + nextThoughtNeeded: z.boolean().describe("Whether another thought step is needed"), + thoughtNumber: z.number().int().min(1).describe("Current thought number (numeric value, e.g., 1, 2, 3)"), + totalThoughts: z.number().int().min(1).describe("Estimated total thoughts needed (numeric value, e.g., 5, 10)"), + isRevision: z.boolean().optional().describe("Whether this revises previous thinking"), + revisesThought: z.number().int().min(1).optional().describe("Which thought is being reconsidered"), + branchFromThought: z.number().int().min(1).optional().describe("Branching point thought number"), + branchId: z.string().optional().describe("Branch identifier"), + needsMoreThoughts: z.boolean().optional().describe("If more thoughts are needed") }, - required: ["thought", "nextThoughtNeeded", "thoughtNumber", "totalThoughts"] - } -}; - -const server = new Server( - { - name: "sequential-thinking-server", - version: "0.2.0", - }, - { - capabilities: { - tools: {}, + outputSchema: { + thoughtNumber: z.number(), + totalThoughts: z.number(), + nextThoughtNeeded: z.boolean(), + branches: z.array(z.string()), + thoughtHistoryLength: z.number() }, - } -); + }, + async (args) => { + const result = thinkingServer.processThought(args); -const thinkingServer = new SequentialThinkingServer(); + if (result.isError) { + return result; + } -server.setRequestHandler(ListToolsRequestSchema, async () => ({ - tools: [SEQUENTIAL_THINKING_TOOL], -})); + // Parse the JSON response to get structured content + const parsedContent = JSON.parse(result.content[0].text); -server.setRequestHandler(CallToolRequestSchema, async (request) => { - if (request.params.name === "sequentialthinking") { - return thinkingServer.processThought(request.params.arguments); + return { + content: result.content, + structuredContent: parsedContent + }; } - - return { - content: [{ - type: "text", - text: `Unknown tool: ${request.params.name}` - }], - isError: true - }; -}); +); async function runServer() { const transport = new StdioServerTransport(); diff --git a/src/sequentialthinking/lib.ts b/src/sequentialthinking/lib.ts index c5ee9cad3c..31a1098644 100644 --- a/src/sequentialthinking/lib.ts +++ b/src/sequentialthinking/lib.ts @@ -21,35 +21,6 @@ export class SequentialThinkingServer { this.disableThoughtLogging = (process.env.DISABLE_THOUGHT_LOGGING || "").toLowerCase() === "true"; } - private validateThoughtData(input: unknown): ThoughtData { - const data = input as Record; - - if (!data.thought || typeof data.thought !== 'string') { - throw new Error('Invalid thought: must be a string'); - } - if (!data.thoughtNumber || typeof data.thoughtNumber !== 'number') { - throw new Error('Invalid thoughtNumber: must be a number'); - } - if (!data.totalThoughts || typeof data.totalThoughts !== 'number') { - throw new Error('Invalid totalThoughts: must be a number'); - } - if (typeof data.nextThoughtNeeded !== 'boolean') { - throw new Error('Invalid nextThoughtNeeded: must be a boolean'); - } - - return { - thought: data.thought, - thoughtNumber: data.thoughtNumber, - totalThoughts: data.totalThoughts, - nextThoughtNeeded: data.nextThoughtNeeded, - isRevision: data.isRevision as boolean | undefined, - revisesThought: data.revisesThought as number | undefined, - branchFromThought: data.branchFromThought as number | undefined, - branchId: data.branchId as string | undefined, - needsMoreThoughts: data.needsMoreThoughts as boolean | undefined, - }; - } - private formatThought(thoughtData: ThoughtData): string { const { thoughtNumber, totalThoughts, thought, isRevision, revisesThought, branchFromThought, branchId } = thoughtData; @@ -78,35 +49,35 @@ export class SequentialThinkingServer { └${border}┘`; } - public processThought(input: unknown): { content: Array<{ type: string; text: string }>; isError?: boolean } { + public processThought(input: ThoughtData): { content: Array<{ type: "text"; text: string }>; isError?: boolean } { try { - const validatedInput = this.validateThoughtData(input); - - if (validatedInput.thoughtNumber > validatedInput.totalThoughts) { - validatedInput.totalThoughts = validatedInput.thoughtNumber; + // Validation happens at the tool registration layer via Zod + // Adjust totalThoughts if thoughtNumber exceeds it + if (input.thoughtNumber > input.totalThoughts) { + input.totalThoughts = input.thoughtNumber; } - this.thoughtHistory.push(validatedInput); + this.thoughtHistory.push(input); - if (validatedInput.branchFromThought && validatedInput.branchId) { - if (!this.branches[validatedInput.branchId]) { - this.branches[validatedInput.branchId] = []; + if (input.branchFromThought && input.branchId) { + if (!this.branches[input.branchId]) { + this.branches[input.branchId] = []; } - this.branches[validatedInput.branchId].push(validatedInput); + this.branches[input.branchId].push(input); } if (!this.disableThoughtLogging) { - const formattedThought = this.formatThought(validatedInput); + const formattedThought = this.formatThought(input); console.error(formattedThought); } return { content: [{ - type: "text", + type: "text" as const, text: JSON.stringify({ - thoughtNumber: validatedInput.thoughtNumber, - totalThoughts: validatedInput.totalThoughts, - nextThoughtNeeded: validatedInput.nextThoughtNeeded, + thoughtNumber: input.thoughtNumber, + totalThoughts: input.totalThoughts, + nextThoughtNeeded: input.nextThoughtNeeded, branches: Object.keys(this.branches), thoughtHistoryLength: this.thoughtHistory.length }, null, 2) @@ -115,7 +86,7 @@ export class SequentialThinkingServer { } catch (error) { return { content: [{ - type: "text", + type: "text" as const, text: JSON.stringify({ error: error instanceof Error ? error.message : String(error), status: 'failed' diff --git a/src/sequentialthinking/tsconfig.json b/src/sequentialthinking/tsconfig.json index 2ce5843e0b..d2d86555b0 100644 --- a/src/sequentialthinking/tsconfig.json +++ b/src/sequentialthinking/tsconfig.json @@ -2,9 +2,13 @@ "extends": "../../tsconfig.json", "compilerOptions": { "outDir": "./dist", - "rootDir": ".", - "moduleResolution": "NodeNext", - "module": "NodeNext" + "rootDir": "." }, - "include": ["./**/*.ts"] + "include": [ + "./**/*.ts" + ], + "exclude": [ + "**/*.test.ts", + "vitest.config.ts" + ] }