From 802648b81c09f72b477b59af10e4428398aef5d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B5=B5=E6=97=A5=E5=A4=A9?= Date: Fri, 10 Apr 2026 00:23:24 +0800 Subject: [PATCH 01/29] Refine CLI logging into concise Markdown output --- cli/src/commands/CleanCommand.ts | 29 +-- cli/src/commands/DryRunCleanCommand.ts | 28 +-- cli/src/commands/DryRunOutputCommand.ts | 22 +- cli/src/commands/ExecuteCommand.ts | 75 ++---- cli/src/commands/HelpCommand.ts | 77 ++++--- cli/src/commands/JsonOutputCommand.ts | 8 +- cli/src/commands/PluginsCommand.ts | 21 +- cli/src/commands/UnknownCommand.ts | 9 +- cli/src/commands/VersionCommand.ts | 4 +- cli/src/commands/execution-preflight.ts | 45 ++-- cli/src/commands/execution-routing.test.ts | 27 +-- cli/src/plugin-runtime.ts | 16 +- gui/scripts/generate-icons.ts | 12 +- gui/scripts/generate-routes.ts | 7 +- gui/scripts/run-tauri-tests.ts | 14 +- gui/src-tauri/src/commands.rs | 109 +++------ gui/src-tauri/tests/ipc_contract_property.rs | 37 ++- gui/src/api/bridge.property.test.ts | 30 +-- gui/src/api/bridge.ts | 7 +- gui/src/components/MarkdownLogBlock.tsx | 85 +++++++ gui/src/i18n/en-US.json | 6 +- gui/src/i18n/zh-CN.json | 6 +- gui/src/pages/LogsPage.tsx | 56 ++--- gui/src/utils/logFilter.property.test.ts | 48 ++-- gui/src/utils/logFilter.test.ts | 87 +++---- gui/src/utils/logFilter.ts | 36 +-- libraries/logger/src/index.test.ts | 11 +- libraries/logger/src/index.ts | 50 +--- libraries/logger/src/lib.rs | 230 +++++++------------ scripts/build-native.ts | 27 ++- scripts/build-quiet.ts | 9 +- scripts/cargo-test.ts | 10 +- scripts/copy-napi.ts | 39 +++- scripts/install-rust-deps.ts | 14 +- scripts/markdown-output.ts | 117 ++++++++++ scripts/postinstall.ts | 12 +- sdk/scripts/finalize-bundle.ts | 7 +- sdk/scripts/generate-schema.ts | 7 +- sdk/src/ConfigLoader.ts | 4 +- sdk/src/config.ts | 8 +- sdk/src/core/config/mod.rs | 4 +- sdk/src/diagnostics.ts | 41 ++-- sdk/src/runtime/cleanup.ts | 41 ++-- sdk/src/wsl-mirror-sync.ts | 14 +- 44 files changed, 771 insertions(+), 775 deletions(-) create mode 100644 gui/src/components/MarkdownLogBlock.tsx create mode 100644 scripts/markdown-output.ts diff --git a/cli/src/commands/CleanCommand.ts b/cli/src/commands/CleanCommand.ts index c6bcb2ba..8adcf436 100644 --- a/cli/src/commands/CleanCommand.ts +++ b/cli/src/commands/CleanCommand.ts @@ -10,33 +10,18 @@ export class CleanCommand implements Command { if (preflightResult != null) return preflightResult const {logger, outputPlugins, createCleanContext, collectedOutputContext} = ctx - logger.info('started', { - command: 'clean', - pluginCount: outputPlugins.length, - projectCount: collectedOutputContext.workspace.projects.length, - workspaceDir: collectedOutputContext.workspace.directory.path + logger.info('Running cleanup', { + plugins: outputPlugins.length, + projects: collectedOutputContext.workspace.projects.length, + workspace: collectedOutputContext.workspace.directory.path }) - logger.info('clean phase started', {phase: 'cleanup'}) const result = await performCleanup(outputPlugins, createCleanContext(false), logger) if (result.violations.length > 0 || result.conflicts.length > 0) { - logger.info('clean halted', { - phase: 'cleanup', - conflicts: result.conflicts.length, - violations: result.violations.length, - ...result.message != null ? {message: result.message} : {} - }) return {success: false, filesAffected: 0, dirsAffected: 0, ...result.message != null ? {message: result.message} : {}} } - logger.info('clean phase complete', { - phase: 'cleanup', - deletedFiles: result.deletedFiles, - deletedDirs: result.deletedDirs, - errors: result.errors.length - }) - logger.info('complete', { - command: 'clean', - filesAffected: result.deletedFiles, - dirsAffected: result.deletedDirs + logger.info('Cleanup complete', { + files: result.deletedFiles, + directories: result.deletedDirs }) return {success: true, filesAffected: result.deletedFiles, dirsAffected: result.deletedDirs} } diff --git a/cli/src/commands/DryRunCleanCommand.ts b/cli/src/commands/DryRunCleanCommand.ts index 90933b96..76389612 100644 --- a/cli/src/commands/DryRunCleanCommand.ts +++ b/cli/src/commands/DryRunCleanCommand.ts @@ -1,6 +1,6 @@ import type {Command, CommandContext, CommandResult} from './Command' import * as path from 'node:path' -import {collectAllPluginOutputs, collectDeletionTargets, logProtectedDeletionGuardError} from '@truenine/memory-sync-sdk' +import {collectDeletionTargets, logProtectedDeletionGuardError} from '@truenine/memory-sync-sdk' import {runExecutionPreflight} from './execution-preflight' export class DryRunCleanCommand implements Command { @@ -11,16 +11,10 @@ export class DryRunCleanCommand implements Command { if (preflightResult != null) return preflightResult const {logger, outputPlugins, createCleanContext} = ctx - logger.info('running clean pipeline', {command: 'dry-run-clean', dryRun: true}) - const cleanCtx = createCleanContext(true) - const outputs = await collectAllPluginOutputs(outputPlugins, cleanCtx) - logger.info('collected outputs for cleanup', { - dryRun: true, - projectDirs: outputs.projectDirs.length, - projectFiles: outputs.projectFiles.length, - globalDirs: outputs.globalDirs.length, - globalFiles: outputs.globalFiles.length + logger.info('Running cleanup preview', { + plugins: outputPlugins.length }) + const cleanCtx = createCleanContext(true) const {filesToDelete, dirsToDelete, emptyDirsToDelete, violations, excludedScanGlobs} = await collectDeletionTargets(outputPlugins, cleanCtx) const totalDirsToDelete = [...dirsToDelete, ...emptyDirsToDelete] @@ -35,16 +29,14 @@ export class DryRunCleanCommand implements Command { } } - for (const file of filesToDelete) logger.info('would delete file', {path: path.isAbsolute(file) ? file : path.resolve(file), dryRun: true}) + for (const file of filesToDelete) logger.info('Would remove file', {path: path.isAbsolute(file) ? file : path.resolve(file)}) for (const dir of [...totalDirsToDelete].sort((a, b) => b.length - a.length)) - { logger.info('would delete directory', {path: path.isAbsolute(dir) ? dir : path.resolve(dir), dryRun: true}) } + { logger.info('Would remove directory', {path: path.isAbsolute(dir) ? dir : path.resolve(dir)}) } - logger.info('clean complete', { - dryRun: true, - filesAffected: filesToDelete.length, - dirsAffected: totalDirsToDelete.length, - violations: 0, - excludedScanGlobs + logger.info('Cleanup preview complete', { + files: filesToDelete.length, + directories: totalDirsToDelete.length, + excludedGlobs: excludedScanGlobs.length }) return { diff --git a/cli/src/commands/DryRunOutputCommand.ts b/cli/src/commands/DryRunOutputCommand.ts index d5aeb416..a517c230 100644 --- a/cli/src/commands/DryRunOutputCommand.ts +++ b/cli/src/commands/DryRunOutputCommand.ts @@ -9,18 +9,21 @@ export class DryRunOutputCommand implements Command { const preflightResult = runExecutionPreflight(ctx, this.name) if (preflightResult != null) return preflightResult - const {logger, outputPlugins, createWriteContext} = ctx - logger.info('started', {command: 'dry-run-output', dryRun: true}) + const {logger, outputPlugins, createWriteContext, collectedOutputContext} = ctx + logger.info('Running dry run', { + plugins: outputPlugins.length, + projects: collectedOutputContext.workspace.projects.length, + workspace: collectedOutputContext.workspace.directory.path + }) const writeCtx = createWriteContext(true) const predeclaredOutputs = await collectOutputDeclarations(outputPlugins, writeCtx) const results = await executeDeclarativeWriteOutputs(outputPlugins, writeCtx, predeclaredOutputs) let totalFiles = 0 let totalDirs = 0 - for (const [pluginName, result] of results) { + for (const result of results.values()) { totalFiles += result.files.length totalDirs += result.dirs.length - logger.info('plugin result', {plugin: pluginName, files: result.files.length, dirs: result.dirs.length, dryRun: true}) } const wslMirrorResult = await syncWindowsConfigIntoWsl(outputPlugins, writeCtx, void 0, predeclaredOutputs) @@ -29,7 +32,16 @@ export class DryRunOutputCommand implements Command { } totalFiles += wslMirrorResult.mirroredFiles - logger.info('complete', {command: 'dry-run-output', totalFiles, totalDirs, dryRun: true}) + if (wslMirrorResult.mirroredFiles > 0 || wslMirrorResult.warnings.length > 0) { + logger.info('Prepared WSL mirror preview', { + files: wslMirrorResult.mirroredFiles, + warnings: wslMirrorResult.warnings.length + }) + } + logger.info('Dry run complete', { + files: totalFiles, + directories: totalDirs + }) return {success: true, filesAffected: totalFiles, dirsAffected: totalDirs, message: 'Dry-run complete, no files were written'} } } diff --git a/cli/src/commands/ExecuteCommand.ts b/cli/src/commands/ExecuteCommand.ts index 9fb046bc..31d85af5 100644 --- a/cli/src/commands/ExecuteCommand.ts +++ b/cli/src/commands/ExecuteCommand.ts @@ -10,46 +10,31 @@ export class ExecuteCommand implements Command { if (preflightResult != null) return preflightResult const {logger, outputPlugins, createCleanContext, createWriteContext, collectedOutputContext} = ctx - logger.info('started', { - command: 'execute', - pluginCount: outputPlugins.length, - projectCount: collectedOutputContext.workspace.projects.length, - workspaceDir: collectedOutputContext.workspace.directory.path + logger.info('Running sync', { + plugins: outputPlugins.length, + projects: collectedOutputContext.workspace.projects.length, + workspace: collectedOutputContext.workspace.directory.path }) const writeCtx = createWriteContext(false) - logger.info('execute phase started', {phase: 'collect-output-declarations'}) const predeclaredOutputs = await collectOutputDeclarations(outputPlugins, writeCtx) const declarationCount = [...predeclaredOutputs.values()] .reduce((total, declarations) => total + declarations.length, 0) - logger.info('execute phase complete', { - phase: 'collect-output-declarations', - pluginCount: predeclaredOutputs.size, - declarationCount + logger.info('Prepared output plan', { + plugins: predeclaredOutputs.size, + declarations: declarationCount }) - logger.info('execute phase started', {phase: 'cleanup-before-write'}) const cleanupResult = await performCleanup(outputPlugins, createCleanContext(false), logger, predeclaredOutputs) if (cleanupResult.violations.length > 0 || cleanupResult.conflicts.length > 0) { - logger.info('execute halted', { - phase: 'cleanup-before-write', - conflicts: cleanupResult.conflicts.length, - violations: cleanupResult.violations.length, - ...cleanupResult.message != null ? {message: cleanupResult.message} : {} - }) return {success: false, filesAffected: 0, dirsAffected: 0, ...cleanupResult.message != null ? {message: cleanupResult.message} : {}} } - logger.info('execute phase complete', { - phase: 'cleanup-before-write', - deletedFiles: cleanupResult.deletedFiles, - deletedDirs: cleanupResult.deletedDirs + logger.info('Removed stale generated files', { + files: cleanupResult.deletedFiles, + directories: cleanupResult.deletedDirs }) - logger.info('execute phase started', { - phase: 'write-output-files', - declarationCount - }) const results = await executeDeclarativeWriteOutputs(outputPlugins, writeCtx, predeclaredOutputs) let totalFiles = 0 @@ -63,45 +48,31 @@ export class ExecuteCommand implements Command { } } - logger.info('execute phase complete', { - phase: 'write-output-files', - pluginCount: results.size, - filesAffected: totalFiles, - dirsAffected: totalDirs, - writeErrors: writeErrors.length + logger.info('Wrote output files', { + plugins: results.size, + files: totalFiles, + directories: totalDirs }) if (writeErrors.length > 0) { - logger.info('execute halted', { - phase: 'write-output-files', - writeErrors: writeErrors.length - }) return {success: false, filesAffected: totalFiles, dirsAffected: totalDirs, message: writeErrors.join('\n')} } - logger.info('execute phase started', {phase: 'sync-wsl-mirrors'}) const wslMirrorResult = await syncWindowsConfigIntoWsl(outputPlugins, writeCtx, void 0, predeclaredOutputs) if (wslMirrorResult.errors.length > 0) { - logger.info('execute halted', { - phase: 'sync-wsl-mirrors', - mirroredFiles: wslMirrorResult.mirroredFiles, - errors: wslMirrorResult.errors.length - }) return {success: false, filesAffected: totalFiles, dirsAffected: totalDirs, message: wslMirrorResult.errors.join('\n')} } totalFiles += wslMirrorResult.mirroredFiles - logger.info('execute phase complete', { - phase: 'sync-wsl-mirrors', - mirroredFiles: wslMirrorResult.mirroredFiles, - warnings: wslMirrorResult.warnings.length, - errors: wslMirrorResult.errors.length - }) - logger.info('complete', { - command: 'execute', - pluginCount: results.size, - filesAffected: totalFiles, - dirsAffected: totalDirs + if (wslMirrorResult.mirroredFiles > 0 || wslMirrorResult.warnings.length > 0) { + logger.info('Synced WSL mirrors', { + files: wslMirrorResult.mirroredFiles, + warnings: wslMirrorResult.warnings.length + }) + } + logger.info('Sync complete', { + files: totalFiles, + directories: totalDirs }) return {success: true, filesAffected: totalFiles, dirsAffected: totalDirs} } diff --git a/cli/src/commands/HelpCommand.ts b/cli/src/commands/HelpCommand.ts index 3e36de1a..841b98e2 100644 --- a/cli/src/commands/HelpCommand.ts +++ b/cli/src/commands/HelpCommand.ts @@ -1,53 +1,60 @@ import type {Command, CommandContext, CommandResult} from './Command' +import process from 'node:process' import {getCliVersion} from './VersionCommand' const CLI_NAME = 'tnmsc' const HELP_TEXT = ` -${CLI_NAME} v${getCliVersion()} - Memory Sync CLI +# ${CLI_NAME} v${getCliVersion()} Synchronize AI memory and configuration files across projects. -USAGE: - ${CLI_NAME} Run the sync pipeline (default) - ${CLI_NAME} help Show this help message - ${CLI_NAME} version Show version information - ${CLI_NAME} dry-run Preview what would be written - ${CLI_NAME} clean Remove all generated files - ${CLI_NAME} clean --dry-run Preview what would be cleaned - -SUBCOMMANDS: - help Show this help message - version Show version information - dry-run Preview changes without writing files - clean Remove all generated output files and directories - -ALIASES: - ${CLI_NAME} --help, ${CLI_NAME} -h Same as '${CLI_NAME} help' - ${CLI_NAME} --version, ${CLI_NAME} -v Same as '${CLI_NAME} version' - ${CLI_NAME} clean -n Same as '${CLI_NAME} clean --dry-run' - -LOG LEVEL OPTIONS: - --trace Most verbose output - --debug Debug information - --info Standard information (default) - --warn Warnings only - --error Errors only - -CLEAN OPTIONS: - -n, --dry-run Preview cleanup without removing files - -CONFIGURATION: - Global user config lives at ~/.aindex/.tnmsc.json. - Edit that file directly, then use plugin.config.ts in your project root - for project-side plugin assembly and runtime overrides. +## Usage + +- \`${CLI_NAME}\` runs the sync pipeline. +- \`${CLI_NAME} help\` shows this help message. +- \`${CLI_NAME} version\` shows the CLI version. +- \`${CLI_NAME} dry-run\` previews what would be written. +- \`${CLI_NAME} clean\` removes generated files. +- \`${CLI_NAME} clean --dry-run\` previews what would be cleaned. + +## Subcommands + +- \`help\` shows this help message. +- \`version\` shows version information. +- \`dry-run\` previews changes without writing files. +- \`clean\` removes generated output files and directories. + +## Aliases + +- \`${CLI_NAME} --help\` and \`${CLI_NAME} -h\` are the same as \`${CLI_NAME} help\`. +- \`${CLI_NAME} --version\` and \`${CLI_NAME} -v\` are the same as \`${CLI_NAME} version\`. +- \`${CLI_NAME} clean -n\` is the same as \`${CLI_NAME} clean --dry-run\`. + +## Log Controls + +- \`--trace\` shows the most detail. +- \`--debug\` shows debug detail. +- \`--info\` shows key progress and results. +- \`--warn\` shows warnings only. +- \`--error\` shows errors only. + +## Clean Option + +- \`-n\`, \`--dry-run\` previews cleanup without removing files. + +## Configuration + +- Global user config: \`~/.aindex/.tnmsc.json\` +- Project runtime assembly: \`plugin.config.ts\` `.trim() export class HelpCommand implements Command { readonly name = 'help' async execute(ctx: CommandContext): Promise { - ctx.logger.info(HELP_TEXT) + void ctx + process.stdout.write(`${HELP_TEXT}\n`) return {success: true, filesAffected: 0, dirsAffected: 0, message: 'Help displayed'} } } diff --git a/cli/src/commands/JsonOutputCommand.ts b/cli/src/commands/JsonOutputCommand.ts index c4d61637..9715686b 100644 --- a/cli/src/commands/JsonOutputCommand.ts +++ b/cli/src/commands/JsonOutputCommand.ts @@ -1,20 +1,22 @@ -import type {LoggerDiagnosticRecord} from '@truenine/memory-sync-sdk' import type {Command, CommandContext, CommandResult} from './Command' import process from 'node:process' import { clearBufferedDiagnostics, drainBufferedDiagnostics, + type LoggerDiagnosticRecord, partitionBufferedDiagnostics } from '@truenine/memory-sync-sdk' +type PublicLoggerDiagnosticRecord = Omit + interface JsonCommandResult { readonly success: boolean readonly filesAffected: number readonly dirsAffected: number readonly message?: string readonly pluginResults: readonly [] - readonly warnings: readonly LoggerDiagnosticRecord[] - readonly errors: readonly LoggerDiagnosticRecord[] + readonly warnings: readonly PublicLoggerDiagnosticRecord[] + readonly errors: readonly PublicLoggerDiagnosticRecord[] } export class JsonOutputCommand implements Command { diff --git a/cli/src/commands/PluginsCommand.ts b/cli/src/commands/PluginsCommand.ts index 5d12cd75..cdd66b65 100644 --- a/cli/src/commands/PluginsCommand.ts +++ b/cli/src/commands/PluginsCommand.ts @@ -5,7 +5,7 @@ export class PluginsCommand implements Command { readonly name = 'plugins' async execute(ctx: CommandContext): Promise { - const {logger, outputPlugins} = ctx + const {outputPlugins} = ctx const pluginInfos: JsonPluginInfo[] = [] for (const plugin of outputPlugins) { @@ -17,8 +17,23 @@ export class PluginsCommand implements Command { }) } - process.stdout.write(`${JSON.stringify(pluginInfos)}\n`) - logger.info('plugins listed', {count: pluginInfos.length}) + if (process.argv.includes('--bridge-json')) { + process.stdout.write(`${JSON.stringify(pluginInfos)}\n`) + } else { + const lines = ['# Registered plugins', ''] + if (pluginInfos.length === 0) { + lines.push('- No plugins are currently registered.') + } else { + for (const plugin of pluginInfos) { + const dependencySuffix = plugin.dependencies.length > 0 + ? ` (depends on: ${plugin.dependencies.join(', ')})` + : '' + lines.push(`- ${plugin.name}${dependencySuffix}`) + } + } + process.stdout.write(`${lines.join('\n')}\n`) + } + return {success: true, filesAffected: 0, dirsAffected: 0, message: `Listed ${pluginInfos.length} plugin(s)`} } } diff --git a/cli/src/commands/UnknownCommand.ts b/cli/src/commands/UnknownCommand.ts index c8ec4a05..9cf49028 100644 --- a/cli/src/commands/UnknownCommand.ts +++ b/cli/src/commands/UnknownCommand.ts @@ -10,14 +10,15 @@ export class UnknownCommand implements Command { ctx.logger.error( buildUsageDiagnostic({ code: 'UNKNOWN_COMMAND', - title: `Unknown tnmsc command: ${this.unknownCmd}`, - rootCause: diagnosticLines(`tnmsc does not recognize the "${this.unknownCmd}" subcommand.`), - exactFix: diagnosticLines('Run `tnmsc help` and invoke one of the supported commands.'), + title: 'Command not found', + rootCause: diagnosticLines( + `tnmsc does not recognize "${this.unknownCmd}".` + ), + exactFix: diagnosticLines('Run `tnmsc help`, then retry with a supported command.'), possibleFixes: [diagnosticLines('Check the command spelling and remove unsupported aliases or flags.')], details: {command: this.unknownCmd} }) ) - ctx.logger.info('run "tnmsc help" for available commands') return {success: false, filesAffected: 0, dirsAffected: 0, message: `Unknown command: ${this.unknownCmd}`} } } diff --git a/cli/src/commands/VersionCommand.ts b/cli/src/commands/VersionCommand.ts index c49ab789..97121690 100644 --- a/cli/src/commands/VersionCommand.ts +++ b/cli/src/commands/VersionCommand.ts @@ -1,4 +1,5 @@ import type {Command, CommandContext, CommandResult} from './Command' +import process from 'node:process' const CLI_NAME = 'tnmsc' @@ -10,7 +11,8 @@ export class VersionCommand implements Command { readonly name = 'version' async execute(ctx: CommandContext): Promise { - ctx.logger.info(`${CLI_NAME} v${getCliVersion()}`) + void ctx + process.stdout.write(`# ${CLI_NAME} v${getCliVersion()}\n`) return {success: true, filesAffected: 0, dirsAffected: 0, message: 'Version displayed'} } } diff --git a/cli/src/commands/execution-preflight.ts b/cli/src/commands/execution-preflight.ts index 5e100e9b..ed004f42 100644 --- a/cli/src/commands/execution-preflight.ts +++ b/cli/src/commands/execution-preflight.ts @@ -16,11 +16,9 @@ function logExternalProjectGroups(ctx: CommandContext): void { for (const series of SERIES_ORDER) { const projects = ctx.executionPlan.projectsBySeries[series] if (projects.length === 0) continue - ctx.logger.info('external execution project group', { - phase: 'execution-scope', - scope: 'external', + ctx.logger.debug('External execution includes project group', { series, - projectCount: projects.length, + count: projects.length, projects: projects.map(project => project.name) }) } @@ -31,19 +29,11 @@ function logProjectSummary( commandName: string, project: ExecutionPlanProjectSummary ): void { - ctx.logger.info('execution scope resolved to project', { - phase: 'execution-scope', + ctx.logger.info('Running against one managed project', { command: commandName, - scope: 'project', - cwd: ctx.executionPlan.cwd, - workspaceDir: ctx.executionPlan.workspaceDir, - projectName: project.name, - ...project.series != null ? {projectSeries: project.series} : {} - }) - ctx.logger.info('project-scoped execution only targets the matched project and global outputs', { - phase: 'execution-scope', - command: commandName, - projectName: project.name + project: project.name, + ...project.series != null ? {series: project.series} : {}, + workspace: ctx.executionPlan.workspaceDir }) } @@ -55,16 +45,15 @@ export function runExecutionPreflight( case 'workspace': ctx.logger.warn(buildDiagnostic({ code: 'EXECUTION_SCOPE_WORKSPACE', - title: 'Execution is limited to workspace-level outputs', + title: 'Running from the workspace root', rootCause: diagnosticLines( - `tnmsc resolved the current execution directory "${ctx.executionPlan.cwd}" to the workspace root.`, - 'This run will sync or clean only workspace-level outputs plus global outputs to improve performance.' + `This run will only touch workspace-level outputs and global outputs.`, + `Current directory: ${ctx.executionPlan.cwd}` ), exactFix: diagnosticLines( - 'Run tnmsc from a managed project directory to target one project, or from outside the workspace to process every managed project.' + 'Run tnmsc from a managed project directory to target one project, or from outside the workspace to include every managed project.' ), details: { - phase: 'execution-scope', command: commandName, scope: 'workspace', cwd: ctx.executionPlan.cwd, @@ -78,16 +67,15 @@ export function runExecutionPreflight( case 'external': ctx.logger.warn(buildDiagnostic({ code: 'EXECUTION_SCOPE_EXTERNAL', - title: 'Execution will process the full workspace and all managed projects', + title: 'Running outside the workspace', rootCause: diagnosticLines( - `tnmsc resolved the current execution directory "${ctx.executionPlan.cwd}" as external to workspace "${ctx.executionPlan.workspaceDir}".`, - 'This run may take longer because it will process workspace-level outputs, all managed projects, and global outputs.' + `This run will process the workspace, every managed project, and global outputs.`, + `Current directory: ${ctx.executionPlan.cwd}` ), exactFix: diagnosticLines( `Run tnmsc from "${ctx.executionPlan.workspaceDir}" for workspace-only execution, or from a managed project directory for project-only execution.` ), details: { - phase: 'execution-scope', command: commandName, scope: 'external', cwd: ctx.executionPlan.cwd, @@ -100,16 +88,15 @@ export function runExecutionPreflight( const message = buildUnsupportedMessage(ctx) ctx.logger.error(buildDiagnostic({ code: 'EXECUTION_SCOPE_UNSUPPORTED', - title: 'Execution directory is inside the workspace but not managed by tnmsc', + title: 'This directory is not a managed tnmsc target', rootCause: diagnosticLines( - `tnmsc resolved "${ctx.executionPlan.cwd}" inside workspace "${ctx.executionPlan.workspaceDir}", but the directory is not the workspace root and does not belong to any managed project.`, - 'Running from this location is unsupported because tnmsc cannot map the request to a workspace-level or project-level execution target.' + `tnmsc cannot map "${ctx.executionPlan.cwd}" to the workspace root or any managed project.`, + `Workspace: ${ctx.executionPlan.workspaceDir}` ), exactFix: diagnosticLines( 'Run tnmsc from the workspace root, from a managed project directory, or from outside the workspace.' ), details: { - phase: 'execution-scope', command: commandName, scope: 'unsupported', cwd: ctx.executionPlan.cwd, diff --git a/cli/src/commands/execution-routing.test.ts b/cli/src/commands/execution-routing.test.ts index 05ecd620..db2e7cbc 100644 --- a/cli/src/commands/execution-routing.test.ts +++ b/cli/src/commands/execution-routing.test.ts @@ -17,14 +17,12 @@ function createEmptyProjectsBySeries() { } const { - collectAllPluginOutputsMock, collectDeletionTargetsMock, collectOutputDeclarationsMock, executeDeclarativeWriteOutputsMock, performCleanupMock, syncWindowsConfigIntoWslMock } = vi.hoisted(() => ({ - collectAllPluginOutputsMock: vi.fn(), collectDeletionTargetsMock: vi.fn(), collectOutputDeclarationsMock: vi.fn(), executeDeclarativeWriteOutputsMock: vi.fn(), @@ -37,7 +35,6 @@ vi.mock('@truenine/memory-sync-sdk', async importOriginal => { return { ...actual, - collectAllPluginOutputs: collectAllPluginOutputsMock, collectDeletionTargets: collectDeletionTargetsMock, collectOutputDeclarations: collectOutputDeclarationsMock, executeDeclarativeWriteOutputs: executeDeclarativeWriteOutputsMock, @@ -48,12 +45,14 @@ vi.mock('@truenine/memory-sync-sdk', async importOriginal => { function createBaseContext(executionPlan: ExecutionPlan): { readonly ctx: CommandContext + readonly debugSpy: ReturnType readonly infoSpy: ReturnType readonly warnSpy: ReturnType readonly errorSpy: ReturnType } { const workspaceDir = executionPlan.workspaceDir const logger = createLogger('execution-routing-test', 'debug') + const debugSpy = vi.spyOn(logger, 'debug') const infoSpy = vi.spyOn(logger, 'info') const warnSpy = vi.spyOn(logger, 'warn') const errorSpy = vi.spyOn(logger, 'error') @@ -97,6 +96,7 @@ function createBaseContext(executionPlan: ExecutionPlan): { createCleanContext, createWriteContext } as unknown as CommandContext, + debugSpy, infoSpy, warnSpy, errorSpy @@ -160,13 +160,13 @@ describe('execution-aware command routing', () => { expect(result.success).toBe(true) expect(performCleanupMock).toHaveBeenCalledTimes(1) expect(infoSpy.mock.calls).toEqual(expect.arrayContaining([ - ['execution scope resolved to project', expect.objectContaining({projectName: 'plugin-one', projectSeries: 'ext'})] + ['Running against one managed project', expect.objectContaining({project: 'plugin-one', series: 'ext'})] ])) }) it('logs external project groups before running dry-run clean', async () => { const workspaceDir = path.resolve('/tmp/tnmsc-dry-run-clean-external') - const {ctx, infoSpy, warnSpy} = createBaseContext({ + const {ctx, debugSpy, warnSpy} = createBaseContext({ scope: 'external', cwd: path.resolve('/tmp/outside-workspace'), workspaceDir, @@ -177,12 +177,6 @@ describe('execution-aware command routing', () => { softwares: [{name: 'tool-one', rootDir: path.join(workspaceDir, 'tool-one'), series: 'softwares'}] } }) - collectAllPluginOutputsMock.mockResolvedValue({ - projectDirs: [], - projectFiles: [], - globalDirs: [], - globalFiles: [] - }) collectDeletionTargetsMock.mockResolvedValue({ filesToDelete: [], dirsToDelete: [], @@ -195,15 +189,14 @@ describe('execution-aware command routing', () => { const result = await new DryRunCleanCommand().execute(ctx) expect(result.success).toBe(true) - expect(collectAllPluginOutputsMock).toHaveBeenCalledTimes(1) expect(collectDeletionTargetsMock).toHaveBeenCalledTimes(1) expect(warnSpy.mock.calls).toEqual(expect.arrayContaining([ - [expect.objectContaining({code: 'EXECUTION_SCOPE_EXTERNAL', title: 'Execution will process the full workspace and all managed projects'})] + [expect.objectContaining({code: 'EXECUTION_SCOPE_EXTERNAL', title: 'Running outside the workspace'})] ])) - expect(infoSpy.mock.calls).toEqual(expect.arrayContaining([ - ['external execution project group', expect.objectContaining({series: 'app', projects: ['app-one']})], - ['external execution project group', expect.objectContaining({series: 'ext', projects: ['plugin-one']})], - ['external execution project group', expect.objectContaining({series: 'softwares', projects: ['tool-one']})] + expect(debugSpy.mock.calls).toEqual(expect.arrayContaining([ + ['External execution includes project group', expect.objectContaining({series: 'app', projects: ['app-one']})], + ['External execution includes project group', expect.objectContaining({series: 'ext', projects: ['plugin-one']})], + ['External execution includes project group', expect.objectContaining({series: 'softwares', projects: ['tool-one']})] ])) }) }) diff --git a/cli/src/plugin-runtime.ts b/cli/src/plugin-runtime.ts index 6243532b..7c82f624 100644 --- a/cli/src/plugin-runtime.ts +++ b/cli/src/plugin-runtime.ts @@ -89,7 +89,7 @@ async function main(): Promise { if (bridgeJson) setGlobalLogLevel('silent') const logger = createLogger('PluginRuntime') - logger.info('runtime bootstrap started', {subcommand, bridgeJson, dryRun}) + logger.debug('Runtime bootstrap ready', {subcommand, bridgeJson, dryRun}) const userPluginConfig = await createDefaultPluginConfig( process.argv, @@ -103,15 +103,15 @@ async function main(): Promise { const {context, outputPlugins, userConfigOptions, executionPlan} = userPluginConfig - logger.info('runtime configuration resolved', { + logger.debug('Runtime configuration resolved', { command: command.name, - pluginCount: outputPlugins.length, - projectCount: context.workspace.projects.length, - workspaceDir: context.workspace.directory.path, + plugins: outputPlugins.length, + projects: context.workspace.projects.length, + workspace: context.workspace.directory.path, ...context.aindexDir != null ? {aindexDir: context.aindexDir} : {} }) const runtimeTargets = discoverOutputRuntimeTargets(logger) - logger.info('runtime targets discovered', { + logger.debug('Runtime targets discovered', { command: command.name, jetbrainsCodexDirs: runtimeTargets.jetbrainsCodexDirs.length }) @@ -141,9 +141,9 @@ async function main(): Promise { createCleanContext, createWriteContext } - logger.info('command dispatch started', {command: command.name}) + logger.debug('Dispatching command', {command: command.name}) const result = await command.execute(commandCtx) - logger.info('command dispatch complete', { + logger.debug('Command finished', { command: command.name, success: result.success, filesAffected: result.filesAffected, diff --git a/gui/scripts/generate-icons.ts b/gui/scripts/generate-icons.ts index c408eb76..2e488508 100644 --- a/gui/scripts/generate-icons.ts +++ b/gui/scripts/generate-icons.ts @@ -1,6 +1,9 @@ import { execSync } from 'node:child_process' import { dirname, join } from 'node:path' import { fileURLToPath } from 'node:url' +import markdownOutput from '../../scripts/markdown-output' + +const {writeError, writeMarkdownBlock} = markdownOutput const __dirname = dirname(fileURLToPath(import.meta.url)) const rootDir = join(__dirname, '..') @@ -17,9 +20,14 @@ async function main() { encoding: 'utf-8', } ) - console.log('✓ Icons generated successfully') + writeMarkdownBlock('Icon generation complete', { + source: sourceIcon, + output: iconsDir, + }) } catch (error) { - console.error('✗ Failed to generate icons') + writeError('Icon generation failed', { + error: error instanceof Error ? error.message : String(error), + }) process.exit(1) } } diff --git a/gui/scripts/generate-routes.ts b/gui/scripts/generate-routes.ts index 1a30e010..c882d80a 100644 --- a/gui/scripts/generate-routes.ts +++ b/gui/scripts/generate-routes.ts @@ -1,6 +1,9 @@ #!/usr/bin/env tsx import { Generator, getConfig } from '@tanstack/router-generator' import { resolve } from 'node:path' +import markdownOutput from '../../scripts/markdown-output' + +const {writeMarkdownBlock} = markdownOutput const root = resolve(import.meta.dirname, '..') @@ -13,4 +16,6 @@ const config = await getConfig({ const gen = new Generator({ config, root }) await gen.run() -console.log('[generate-routes] routeTree.gen.ts updated') +writeMarkdownBlock('Route tree updated', { + output: resolve(root, 'src/routeTree.gen.ts'), +}) diff --git a/gui/scripts/run-tauri-tests.ts b/gui/scripts/run-tauri-tests.ts index 3647ab2b..144091a3 100644 --- a/gui/scripts/run-tauri-tests.ts +++ b/gui/scripts/run-tauri-tests.ts @@ -1,4 +1,7 @@ import {spawnSync} from 'node:child_process' +import markdownOutput from '../../scripts/markdown-output' + +const {writeError, writeWarning} = markdownOutput function cargoAvailable(): boolean { const result = spawnSync('cargo', ['--version'], { @@ -12,8 +15,9 @@ if (!cargoAvailable()) { // Skip Tauri tests when Rust toolchain is not installed locally so that // JS/Vitest tests can still pass. CI or dev machines with cargo installed // will still run the full `test:tauri` suite. - // eslint-disable-next-line no-console - console.warn('[memory-sync-gui] cargo not found on PATH, skipping Tauri tests (test:tauri).') + writeWarning('Skipping Tauri tests', { + reason: 'cargo is not available on PATH.', + }) process.exit(0) } @@ -23,10 +27,10 @@ const child = spawnSync('pnpm', ['run', 'test:tauri'], { }) if (child.error != null) { - // eslint-disable-next-line no-console - console.error('[memory-sync-gui] Failed to run pnpm test:tauri:', child.error) + writeError('Failed to run `pnpm test:tauri`', { + error: child.error.message, + }) process.exit(1) } process.exit(child.status ?? 1) - diff --git a/gui/src-tauri/src/commands.rs b/gui/src-tauri/src/commands.rs index 991e01ae..2c446642 100644 --- a/gui/src-tauri/src/commands.rs +++ b/gui/src-tauri/src/commands.rs @@ -8,7 +8,7 @@ use std::path::{Path, PathBuf}; use std::process::Command as StdCommand; use serde::{Deserialize, Serialize}; -use serde_json::{Map, Value}; +use serde_json::Value; use tnmsc::core::config as core_config; const PRIMARY_SOURCE_MDX_EXTENSION: &str = ".src.mdx"; @@ -67,10 +67,10 @@ pub struct PluginExecutionResult { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct LogEntry { - pub timestamp: String, - pub level: String, - pub logger: String, - pub payload: serde_json::Value, + pub stream: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub source: Option, + pub markdown: String, } #[derive(Debug, Clone, Deserialize)] @@ -144,8 +144,7 @@ pub fn clean_outputs(cwd: String, dry_run: bool) -> Result Result, String> { let args: Vec<&str> = command.split_whitespace().collect(); @@ -153,12 +152,9 @@ pub fn get_logs(cwd: String, command: String) -> Result, String> { let extra_args: Vec<&str> = args.iter().skip(1).copied().collect(); let result = tnmsc::run_bridge_command(subcommand, Path::new(&cwd), &extra_args) .map_err(|e| e.to_string())?; - let logs = parse_log_lines(&result.stderr); - if logs.is_empty() { - Ok(parse_log_lines(&result.stdout)) - } else { - Ok(logs) - } + let mut logs = parse_log_lines(&result.stdout, "stdout"); + logs.extend(parse_log_lines(&result.stderr, "stderr")); + Ok(logs) } fn parse_pipeline_result(raw: &str, command: &str, dry_run: bool) -> Result { @@ -219,86 +215,45 @@ fn extract_diagnostic_message(diagnostic: &Value) -> Option { Some(format!("[{code}] {title}")) } -/// Parse markdown-style log output into lightweight GUI log entries. -fn parse_log_lines(raw: &str) -> Vec { +/// Parse markdown log output into lightweight GUI log entries. +fn parse_log_lines(raw: &str, stream: &str) -> Vec { let mut entries = Vec::new(); - let mut current: Option = None; + let mut current: Vec = Vec::new(); + let mut saw_markdown_record = false; for raw_line in raw.lines() { let line = raw_line.trim_end(); - if let Some((level, logger, message)) = parse_log_header(line) { - if let Some(entry) = current.take() { - entries.push(entry); - } - - let mut payload = Map::new(); - if let Some(message) = message { - payload.insert("message".to_string(), Value::String(message)); + if line.starts_with("### ") { + if !current.is_empty() { + entries.push(LogEntry { + stream: stream.to_string(), + source: None, + markdown: current.join("\n"), + }); + current.clear(); } - - current = Some(LogEntry { - timestamp: String::new(), - level, - logger, - payload: Value::Object(payload), - }); + saw_markdown_record = true; + current.push(line.to_string()); continue; } - if let Some(entry) = current.as_mut() { - append_log_body_line(&mut entry.payload, line); + if !current.is_empty() || !line.trim().is_empty() || !saw_markdown_record { + current.push(line.to_string()); } } - if let Some(entry) = current.take() { - entries.push(entry); + if !current.is_empty() { + entries.push(LogEntry { + stream: stream.to_string(), + source: None, + markdown: current.join("\n").trim().to_string(), + }); } + entries.retain(|entry| !entry.markdown.trim().is_empty()); entries } -fn parse_log_header(line: &str) -> Option<(String, String, Option)> { - if !line.starts_with("**") { - return None; - } - - let remainder = line.strip_prefix("**")?; - let level_end = remainder.find("**")?; - let level = remainder[..level_end].trim().to_string(); - let after_level = remainder[level_end + 2..].trim_start(); - let logger_start = after_level.find('`')?; - let after_logger_start = &after_level[logger_start + 1..]; - let logger_end = after_logger_start.find('`')?; - let logger = after_logger_start[..logger_end].to_string(); - let message = after_logger_start[logger_end + 1..].trim(); - - Some(( - level, - logger, - if message.is_empty() { - None - } else { - Some(message.to_string()) - }, - )) -} - -fn append_log_body_line(payload: &mut Value, line: &str) { - let object = match payload { - Value::Object(object) => object, - _ => return, - }; - - let entry = object - .entry("body".to_string()) - .or_insert_with(|| Value::Array(Vec::new())); - if let Value::Array(lines) = entry - && !line.trim().is_empty() - { - lines.push(Value::String(line.trim().to_string())); - } -} - /// Resolve the canonical global config file path. fn resolve_global_config_path() -> Result { let home = dirs::home_dir().ok_or("Cannot determine home directory")?; diff --git a/gui/src-tauri/tests/ipc_contract_property.rs b/gui/src-tauri/tests/ipc_contract_property.rs index 34312d91..35854806 100644 --- a/gui/src-tauri/tests/ipc_contract_property.rs +++ b/gui/src-tauri/tests/ipc_contract_property.rs @@ -28,14 +28,16 @@ fn arb_plugin_execution_result() -> impl Strategy } fn arb_log_entry() -> impl Strategy { - (any::(), any::(), any::()).prop_map(|(timestamp, level, logger)| { - LogEntry { - timestamp, - level, - logger, - payload: serde_json::Value::Null, - } - }) + ( + prop::sample::select(vec!["stdout".to_string(), "stderr".to_string()]), + prop::option::of(any::()), + any::(), + ) + .prop_map(|(stream, source, markdown)| LogEntry { + stream, + source, + markdown, + }) } fn arb_pipeline_result() -> impl Strategy { @@ -188,16 +190,11 @@ proptest! { .expect("serialised JSON must be valid"); let obj = val.as_object().expect("LogEntry JSON must be an object"); - prop_assert!(obj.contains_key("timestamp"), "JSON must contain 'timestamp'"); - prop_assert!(obj["timestamp"].is_string(), "'timestamp' must be a string"); - - prop_assert!(obj.contains_key("level"), "JSON must contain 'level'"); - prop_assert!(obj["level"].is_string(), "'level' must be a string"); - - prop_assert!(obj.contains_key("logger"), "JSON must contain 'logger'"); - prop_assert!(obj["logger"].is_string(), "'logger' must be a string"); + prop_assert!(obj.contains_key("stream"), "JSON must contain 'stream'"); + prop_assert!(obj["stream"].is_string(), "'stream' must be a string"); - prop_assert!(obj.contains_key("payload"), "JSON must contain 'payload'"); + prop_assert!(obj.contains_key("markdown"), "JSON must contain 'markdown'"); + prop_assert!(obj["markdown"].is_string(), "'markdown' must be a string"); } /// Round-trip: deserialise(serialise(LogEntry)) == original. @@ -210,8 +207,8 @@ proptest! { let restored: LogEntry = serde_json::from_str(&json) .expect("LogEntry must deserialise from its own JSON"); - prop_assert_eq!(entry.timestamp, restored.timestamp); - prop_assert_eq!(entry.level, restored.level); - prop_assert_eq!(entry.logger, restored.logger); + prop_assert_eq!(entry.stream, restored.stream); + prop_assert_eq!(entry.source, restored.source); + prop_assert_eq!(entry.markdown, restored.markdown); } } diff --git a/gui/src/api/bridge.property.test.ts b/gui/src/api/bridge.property.test.ts index 639829ba..7181da12 100644 --- a/gui/src/api/bridge.property.test.ts +++ b/gui/src/api/bridge.property.test.ts @@ -20,15 +20,10 @@ const arbPluginExecutionResult: fc.Arbitrary = fc.record( dryRun: fc.boolean(), }) -// Use integer ms in a safe range to avoid Invalid Date during shrinking -const MIN_TS = new Date('2000-01-01T00:00:00.000Z').getTime() -const MAX_TS = new Date('2099-12-31T23:59:59.999Z').getTime() - const arbLogEntry: fc.Arbitrary = fc.record({ - timestamp: fc.integer({ min: MIN_TS, max: MAX_TS }).map((ms) => new Date(ms).toISOString()), - level: fc.constantFrom('info', 'warn', 'error', 'debug', 'verbose'), - logger: fc.string({ minLength: 1, maxLength: 64 }), - payload: fc.oneof(fc.string(), fc.integer(), fc.boolean(), fc.constant(null)), + stream: fc.constantFrom('stdout', 'stderr'), + source: fc.option(fc.string({ minLength: 1, maxLength: 64 }), { nil: undefined }), + markdown: fc.string({ minLength: 0, maxLength: 400 }), }) const arbPipelineResult: fc.Arbitrary = fc @@ -122,10 +117,8 @@ describe('LogEntry interface field integrity', () => { fc.property(arbLogEntry, (entry) => { const parsed = JSON.parse(JSON.stringify(entry)) as Record - expect(typeof parsed['timestamp']).toBe('string') - expect(typeof parsed['level']).toBe('string') - expect(typeof parsed['logger']).toBe('string') - expect('payload' in parsed).toBe(true) + expect(typeof parsed['stream']).toBe('string') + expect(typeof parsed['markdown']).toBe('string') }), { numRuns: 200 }, ) @@ -136,10 +129,9 @@ describe('LogEntry interface field integrity', () => { fc.property(arbLogEntry, (entry) => { const roundTripped = JSON.parse(JSON.stringify(entry)) as LogEntry - expect(roundTripped.timestamp).toBe(entry.timestamp) - expect(roundTripped.level).toBe(entry.level) - expect(roundTripped.logger).toBe(entry.logger) - expect(roundTripped.payload).toStrictEqual(entry.payload) + expect(roundTripped.stream).toBe(entry.stream) + expect(roundTripped.source).toBe(entry.source) + expect(roundTripped.markdown).toStrictEqual(entry.markdown) }), { numRuns: 200 }, ) @@ -171,10 +163,8 @@ describe('PipelineResult nested structure integrity', () => { for (const log of parsed.logs) { const l = log as unknown as Record - expect(typeof l['timestamp']).toBe('string') - expect(typeof l['level']).toBe('string') - expect(typeof l['logger']).toBe('string') - expect('payload' in l).toBe(true) + expect(typeof l['stream']).toBe('string') + expect(typeof l['markdown']).toBe('string') } }), { numRuns: 200 }, diff --git a/gui/src/api/bridge.ts b/gui/src/api/bridge.ts index 40c4e83a..c7333e82 100644 --- a/gui/src/api/bridge.ts +++ b/gui/src/api/bridge.ts @@ -1,10 +1,9 @@ import { invoke } from '@tauri-apps/api/core' export interface LogEntry { - readonly timestamp: string - readonly level: string - readonly logger: string - readonly payload: unknown + readonly stream: 'stdout' | 'stderr' + readonly source?: string + readonly markdown: string } export interface PluginExecutionResult { diff --git a/gui/src/components/MarkdownLogBlock.tsx b/gui/src/components/MarkdownLogBlock.tsx new file mode 100644 index 00000000..0576d062 --- /dev/null +++ b/gui/src/components/MarkdownLogBlock.tsx @@ -0,0 +1,85 @@ +import type {FC, ReactNode} from 'react' + +import {cn} from '@/lib/utils' + +interface MarkdownLogBlockProps { + readonly markdown: string + readonly className?: string +} + +function renderInlineMarkdown(text: string): ReactNode[] { + return text.split(/(`[^`]+`)/g).flatMap((segment, segmentIndex) => { + if (/^`[^`]+`$/u.test(segment)) { + return ( + + {segment.slice(1, -1)} + + ) + } + + return segment.split(/(\*\*[^*]+\*\*)/g).map((part, partIndex) => { + if (/^\*\*[^*]+\*\*$/u.test(part)) { + return ( + + {part.slice(2, -2)} + + ) + } + + return {part} + }) + }) +} + +function isMarkdownListBlock(lines: readonly string[]): boolean { + return lines.every((line) => line.trim().length === 0 || /^\s*(?:- |\d+\. )/u.test(line)) +} + +export const MarkdownLogBlock: FC = ({markdown, className}) => { + const blocks = markdown.trim().split(/\n{2,}/u).filter((block) => block.trim().length > 0) + + return ( +
+ {blocks.map((block, index) => { + const lines = block.split('\n') + const firstLine = lines[0]?.trim() ?? '' + + if (firstLine.startsWith('### ')) { + return ( +

+ {renderInlineMarkdown(firstLine.slice(4))} +

+ ) + } + + if (lines.length === 1 && /^\*\*[^*]+\*\*$/u.test(firstLine)) { + return ( +

+ {renderInlineMarkdown(firstLine)} +

+ ) + } + + if (isMarkdownListBlock(lines)) { + return ( +
+              {block}
+            
+ ) + } + + return ( +

+ {renderInlineMarkdown(block)} +

+ ) + })} +
+ ) +} diff --git a/gui/src/i18n/en-US.json b/gui/src/i18n/en-US.json index e5d3c473..f1d561ff 100644 --- a/gui/src/i18n/en-US.json +++ b/gui/src/i18n/en-US.json @@ -39,10 +39,8 @@ "plugins.type.output": "Output Plugin", "logs.title": "Logs", "logs.filter.all": "All", - "logs.filter.error": "Error", - "logs.filter.warn": "Warning", - "logs.filter.info": "Info", - "logs.filter.debug": "Debug", + "logs.filter.stdout": "Stdout", + "logs.filter.stderr": "Stderr", "files.title": "Files", "files.tab.app": "App", "files.tab.ext": "Ext", diff --git a/gui/src/i18n/zh-CN.json b/gui/src/i18n/zh-CN.json index 9e6bd708..9f10b79e 100644 --- a/gui/src/i18n/zh-CN.json +++ b/gui/src/i18n/zh-CN.json @@ -39,10 +39,8 @@ "plugins.type.output": "输出插件", "logs.title": "日志查看", "logs.filter.all": "全部", - "logs.filter.error": "错误", - "logs.filter.warn": "警告", - "logs.filter.info": "信息", - "logs.filter.debug": "调试", + "logs.filter.stdout": "标准输出", + "logs.filter.stderr": "标准错误", "files.title": "文件查看", "files.tab.app": "应用", "files.tab.ext": "扩展", diff --git a/gui/src/pages/LogsPage.tsx b/gui/src/pages/LogsPage.tsx index 0ce7cdb9..97353b68 100644 --- a/gui/src/pages/LogsPage.tsx +++ b/gui/src/pages/LogsPage.tsx @@ -1,28 +1,25 @@ import type { FC } from 'react' import { useMemo, useState } from 'react' +import { MarkdownLogBlock } from '@/components/MarkdownLogBlock' import { useI18n } from '@/i18n' import { cn } from '@/lib/utils' -import type { LogEntry, LogLevel } from '@/utils/logFilter' -import { filterLogsByLevel } from '@/utils/logFilter' +import type { LogEntry, LogStream } from '@/utils/logFilter' +import { filterLogsByStream } from '@/utils/logFilter' -const levelBadgeStyles: Record = { - error: 'bg-red-100 text-red-800 dark:bg-red-900/30 dark:text-red-400', - warn: 'bg-yellow-100 text-yellow-800 dark:bg-yellow-900/30 dark:text-yellow-400', - info: 'bg-blue-100 text-blue-800 dark:bg-blue-900/30 dark:text-blue-400', - debug: 'bg-gray-100 text-gray-800 dark:bg-gray-700/30 dark:text-gray-400', +const streamBadgeStyles: Record = { + stdout: 'bg-blue-100 text-blue-800 dark:bg-blue-900/30 dark:text-blue-400', + stderr: 'bg-amber-100 text-amber-800 dark:bg-amber-900/30 dark:text-amber-400', } -type FilterOption = LogLevel | 'all' +type FilterOption = LogStream | 'all' -const filterOptions: readonly FilterOption[] = ['all', 'error', 'warn', 'info', 'debug'] +const filterOptions: readonly FilterOption[] = ['all', 'stdout', 'stderr'] const filterLabelKeys: Record = { all: 'logs.filter.all', - error: 'logs.filter.error', - warn: 'logs.filter.warn', - info: 'logs.filter.info', - debug: 'logs.filter.debug', + stdout: 'logs.filter.stdout', + stderr: 'logs.filter.stderr', } const LogsPage: FC = () => { @@ -34,7 +31,7 @@ const LogsPage: FC = () => { const filteredLogs = useMemo(() => { if (filter === 'all') return logs - return filterLogsByLevel(logs, filter) + return filterLogsByStream(logs, filter) }, [logs, filter]) return ( @@ -43,7 +40,7 @@ const LogsPage: FC = () => {

{t('logs.title')}

- {/* Level Filter */} + {/* Stream Filter */}
{filterOptions.map((opt) => (
{/* Log Entries */} -
+
{filteredLogs.length === 0 ? (

{logs.length === 0 ? 'No log entries yet.' : 'No entries match the selected filter.'} @@ -72,19 +69,22 @@ const LogsPage: FC = () => { filteredLogs.map((entry, i) => (

- {entry.timestamp} - - {entry.level.toUpperCase()} - - [{entry.namespace}] - {entry.message} +
+ + {entry.stream} + + {entry.source != null && entry.source.length > 0 ? ( + {entry.source} + ) : null} +
+
)) )} diff --git a/gui/src/utils/logFilter.property.test.ts b/gui/src/utils/logFilter.property.test.ts index 46ed8bd8..7aebb794 100644 --- a/gui/src/utils/logFilter.property.test.ts +++ b/gui/src/utils/logFilter.property.test.ts @@ -7,31 +7,18 @@ import fc from 'fast-check' import { describe, expect, it } from 'vitest' -import type { LogEntry, LogLevel } from '@/utils/logFilter' -import { filterLogsByLevel, isLevelAtLeast } from '@/utils/logFilter' +import type { LogEntry, LogStream } from '@/utils/logFilter' +import { filterLogsByStream, isMatchingStream } from '@/utils/logFilter' -const LOG_LEVELS: readonly LogLevel[] = ['error', 'warn', 'info', 'debug'] as const +const LOG_STREAMS: readonly LogStream[] = ['stdout', 'stderr'] as const -const LOG_LEVEL_SEVERITY: Record = { - error: 0, - warn: 1, - info: 2, - debug: 3, -} - -/** Arbitrary for a valid LogLevel */ -const arbLogLevel: fc.Arbitrary = fc.constantFrom(...LOG_LEVELS) - -/** Arbitrary for a LogEntry with a random level. Use integer timestamps to avoid Invalid Date issues. */ -const arbTimestamp: fc.Arbitrary = fc - .integer({ min: 946684800000, max: 4102444799000 }) // 2000-01-01 to 2099-12-31 - .map((ms) => new Date(ms).toISOString()) +/** Arbitrary for a valid LogStream */ +const arbLogStream: fc.Arbitrary = fc.constantFrom(...LOG_STREAMS) const arbLogEntry: fc.Arbitrary = fc.record({ - timestamp: arbTimestamp, - level: arbLogLevel, - namespace: fc.string({ minLength: 1, maxLength: 20 }), - message: fc.string({ minLength: 0, maxLength: 100 }), + stream: arbLogStream, + source: fc.option(fc.string({ minLength: 1, maxLength: 20 }), { nil: undefined }), + markdown: fc.string({ minLength: 0, maxLength: 200 }), }) /** Arbitrary for a non-empty list of LogEntry */ @@ -44,14 +31,13 @@ describe('Property 6: 日志级别过滤', () => { * For any list of log entries and any filter level, * every entry in the filtered result has severity >= the filter level. */ - it('filtered result only contains entries with severity >= filter level', () => { + it('filtered result only contains entries from the selected stream', () => { fc.assert( - fc.property(arbLogEntries, arbLogLevel, (entries, minLevel) => { - const filtered = filterLogsByLevel(entries, minLevel) + fc.property(arbLogEntries, arbLogStream, (entries, stream) => { + const filtered = filterLogsByStream(entries, stream) for (const entry of filtered) { - expect(isLevelAtLeast(entry.level, minLevel)).toBe(true) - expect(LOG_LEVEL_SEVERITY[entry.level]).toBeLessThanOrEqual(LOG_LEVEL_SEVERITY[minLevel]) + expect(isMatchingStream(entry.stream, stream)).toBe(true) } }), { numRuns: 200 }, @@ -67,8 +53,8 @@ describe('Property 6: 日志级别过滤', () => { */ it('relative order of entries is preserved after filtering', () => { fc.assert( - fc.property(arbLogEntries, arbLogLevel, (entries, minLevel) => { - const filtered = filterLogsByLevel(entries, minLevel) + fc.property(arbLogEntries, arbLogStream, (entries, stream) => { + const filtered = filterLogsByStream(entries, stream) // Verify filtered is a subsequence of entries let j = 0 @@ -92,9 +78,9 @@ describe('Property 6: 日志级别过滤', () => { */ it('no qualifying entries are dropped', () => { fc.assert( - fc.property(arbLogEntries, arbLogLevel, (entries, minLevel) => { - const filtered = filterLogsByLevel(entries, minLevel) - const expectedCount = entries.filter((e) => isLevelAtLeast(e.level, minLevel)).length + fc.property(arbLogEntries, arbLogStream, (entries, stream) => { + const filtered = filterLogsByStream(entries, stream) + const expectedCount = entries.filter((entry) => isMatchingStream(entry.stream, stream)).length expect(filtered.length).toBe(expectedCount) }), { numRuns: 200 }, diff --git a/gui/src/utils/logFilter.test.ts b/gui/src/utils/logFilter.test.ts index a71b9308..bd081695 100644 --- a/gui/src/utils/logFilter.test.ts +++ b/gui/src/utils/logFilter.test.ts @@ -1,88 +1,63 @@ import { describe, expect, it } from 'vitest' import type { LogEntry } from '@/utils/logFilter' -import { filterLogsByLevel, isLevelAtLeast } from '@/utils/logFilter' +import { filterLogsByStream, isMatchingStream } from '@/utils/logFilter' -const makeEntry = (level: LogEntry['level'], message: string): LogEntry => ({ - timestamp: '2025-01-01T00:00:00Z', - level, - namespace: 'test', - message, +const makeEntry = (stream: LogEntry['stream'], markdown: string): LogEntry => ({ + stream, + source: 'test', + markdown, }) -describe('isLevelAtLeast', () => { - it('error meets all thresholds', () => { - expect(isLevelAtLeast('error', 'error')).toBe(true) - expect(isLevelAtLeast('error', 'warn')).toBe(true) - expect(isLevelAtLeast('error', 'info')).toBe(true) - expect(isLevelAtLeast('error', 'debug')).toBe(true) +describe('isMatchingStream', () => { + it('matches stdout to stdout', () => { + expect(isMatchingStream('stdout', 'stdout')).toBe(true) }) - it('debug only meets debug threshold', () => { - expect(isLevelAtLeast('debug', 'debug')).toBe(true) - expect(isLevelAtLeast('debug', 'info')).toBe(false) - expect(isLevelAtLeast('debug', 'warn')).toBe(false) - expect(isLevelAtLeast('debug', 'error')).toBe(false) - }) - - it('warn meets warn and below', () => { - expect(isLevelAtLeast('warn', 'error')).toBe(false) - expect(isLevelAtLeast('warn', 'warn')).toBe(true) - expect(isLevelAtLeast('warn', 'info')).toBe(true) - expect(isLevelAtLeast('warn', 'debug')).toBe(true) + it('does not match stdout to stderr', () => { + expect(isMatchingStream('stdout', 'stderr')).toBe(false) }) }) -describe('filterLogsByLevel', () => { +describe('filterLogsByStream', () => { const entries: readonly LogEntry[] = [ - makeEntry('debug', 'debug msg'), - makeEntry('info', 'info msg'), - makeEntry('warn', 'warn msg'), - makeEntry('error', 'error msg'), + makeEntry('stdout', 'sync complete'), + makeEntry('stderr', 'warning'), + makeEntry('stdout', 'cleanup complete'), + makeEntry('stderr', 'error'), ] - it('filters to error only', () => { - const result = filterLogsByLevel(entries, 'error') - expect(result).toHaveLength(1) - expect(result[0].level).toBe('error') - }) - - it('filters to warn and above', () => { - const result = filterLogsByLevel(entries, 'warn') + it('filters stdout entries', () => { + const result = filterLogsByStream(entries, 'stdout') expect(result).toHaveLength(2) - expect(result.map((e) => e.level)).toEqual(['warn', 'error']) + expect(result.every((entry) => entry.stream === 'stdout')).toBe(true) }) - it('filters to info and above', () => { - const result = filterLogsByLevel(entries, 'info') - expect(result).toHaveLength(3) - expect(result.map((e) => e.level)).toEqual(['info', 'warn', 'error']) - }) - - it('debug shows all entries', () => { - const result = filterLogsByLevel(entries, 'debug') - expect(result).toHaveLength(4) + it('filters stderr entries', () => { + const result = filterLogsByStream(entries, 'stderr') + expect(result).toHaveLength(2) + expect(result.every((entry) => entry.stream === 'stderr')).toBe(true) }) it('preserves original order', () => { const mixed: readonly LogEntry[] = [ - makeEntry('error', 'e1'), - makeEntry('debug', 'd1'), - makeEntry('error', 'e2'), - makeEntry('info', 'i1'), + makeEntry('stderr', 'e1'), + makeEntry('stdout', 'o1'), + makeEntry('stderr', 'e2'), + makeEntry('stdout', 'o2'), ] - const result = filterLogsByLevel(mixed, 'error') - expect(result.map((e) => e.message)).toEqual(['e1', 'e2']) + const result = filterLogsByStream(mixed, 'stderr') + expect(result.map((e) => e.markdown)).toEqual(['e1', 'e2']) }) it('returns empty array for empty input', () => { - expect(filterLogsByLevel([], 'debug')).toEqual([]) + expect(filterLogsByStream([], 'stdout')).toEqual([]) }) it('does not mutate the original array', () => { - const original: LogEntry[] = [makeEntry('debug', 'a'), makeEntry('error', 'b')] + const original: LogEntry[] = [makeEntry('stdout', 'a'), makeEntry('stderr', 'b')] const copy = [...original] - filterLogsByLevel(original, 'error') + filterLogsByStream(original, 'stderr') expect(original).toEqual(copy) }) }) diff --git a/gui/src/utils/logFilter.ts b/gui/src/utils/logFilter.ts index c7eabfd9..e704e8fb 100644 --- a/gui/src/utils/logFilter.ts +++ b/gui/src/utils/logFilter.ts @@ -1,38 +1,22 @@ -export type LogLevel = 'error' | 'warn' | 'info' | 'debug' +export type LogStream = 'stdout' | 'stderr' export interface LogEntry { - readonly timestamp: string - readonly level: LogLevel - readonly namespace: string - readonly message: string - readonly meta?: Record + readonly stream: LogStream + readonly source?: string + readonly markdown: string } -/** - * Severity ranking: error (0) > warn (1) > info (2) > debug (3). - * Lower number = higher severity. - */ -const LOG_LEVEL_SEVERITY: Record = { - error: 0, - warn: 1, - info: 2, - debug: 3, -} - -/** - * Check if a given level meets the minimum severity threshold. - */ -export function isLevelAtLeast(level: LogLevel, minLevel: LogLevel): boolean { - return LOG_LEVEL_SEVERITY[level] <= LOG_LEVEL_SEVERITY[minLevel] +export function isMatchingStream(stream: LogStream, filter: LogStream): boolean { + return stream === filter } /** - * Filter log entries by minimum level. - * Returns only entries whose severity is >= the specified minimum level. + * Filter log entries by output stream. + * Returns only entries whose stream matches the selected filter. * Preserves the original order of entries. * * Pure function — no side effects. */ -export function filterLogsByLevel(entries: readonly LogEntry[], minLevel: LogLevel): readonly LogEntry[] { - return entries.filter((entry) => isLevelAtLeast(entry.level, minLevel)) +export function filterLogsByStream(entries: readonly LogEntry[], filter: LogStream): readonly LogEntry[] { + return entries.filter((entry) => isMatchingStream(entry.stream, filter)) } diff --git a/libraries/logger/src/index.test.ts b/libraries/logger/src/index.test.ts index a4fcc338..da41346b 100644 --- a/libraries/logger/src/index.test.ts +++ b/libraries/logger/src/index.test.ts @@ -18,7 +18,7 @@ const nativeModule = vi.hoisted(() => ({ rootCause: ['A warning was buffered.'], level: 'warn', namespace: 'logger-test', - copyText: ['[BUFFERED_WARN] Buffered warning'] + copyText: ['Buffered warning'] } ])) })) @@ -42,7 +42,7 @@ describe('logger bindings', () => { rootCause: ['A warning was buffered.'], level: 'warn', namespace: 'logger-test', - copyText: ['[BUFFERED_WARN] Buffered warning'] + copyText: ['Buffered warning'] } ])) }) @@ -91,18 +91,17 @@ describe('logger bindings', () => { ) const payload = JSON.parse(String(nativeLogger.log.mock.calls[0]?.[2])) as Record expect(payload['count']).toBe(1) - expect(payload['loggerTiming']).toEqual(expect.any(String)) + expect(payload['loggerTiming']).toBeUndefined() expect(nativeLogger.logDiagnostic).not.toHaveBeenCalled() }) - it('adds logger timing even when no metadata is provided', async () => { + it('keeps metadata undefined when no extra fields are provided', async () => { const {createLogger} = await import('./index') const logger = createLogger('logger-test') logger.info('hello') - const payload = JSON.parse(String(nativeLogger.log.mock.calls[0]?.[2])) as Record - expect(payload['loggerTiming']).toEqual(expect.any(String)) + expect(nativeLogger.log.mock.calls[0]?.[2]).toBeUndefined() }) it('skips serializing filtered plain logs on the JS side', async () => { diff --git a/libraries/logger/src/index.ts b/libraries/logger/src/index.ts index 946f18e5..9479d34c 100644 --- a/libraries/logger/src/index.ts +++ b/libraries/logger/src/index.ts @@ -74,11 +74,6 @@ const LOG_LEVEL_PRIORITY: Readonly> = { let napiBinding: NapiLoggerModule | undefined, napiBindingError: Error | undefined -const LOGGER_TIMING_STATE = { - processStartNs: process.hrtime.bigint(), - lastLogNs: void 0 as bigint | undefined -} - function isNapiLoggerModule(value: unknown): value is NapiLoggerModule { if (value == null || typeof value !== 'object') return false @@ -272,55 +267,12 @@ function normalizeLogArguments(message: string | object, meta: unknown[]): {mess } } -function formatElapsedMilliseconds(milliseconds: number): string { - if (!Number.isFinite(milliseconds) || milliseconds <= 0) return '0ms' - if (milliseconds >= 1000) return `${(milliseconds / 1000).toFixed(2)}s` - if (milliseconds >= 100) return `${Math.round(milliseconds)}ms` - return `${milliseconds.toFixed(1)}ms` -} - -function createLoggerTimingLabel(): string { - const nowNs = process.hrtime.bigint() - const sinceStartMs = Number(nowNs - LOGGER_TIMING_STATE.processStartNs) / 1_000_000 - const sincePreviousMs = LOGGER_TIMING_STATE.lastLogNs == null - ? sinceStartMs - : Number(nowNs - LOGGER_TIMING_STATE.lastLogNs) / 1_000_000 - - LOGGER_TIMING_STATE.lastLogNs = nowNs - return `+${formatElapsedMilliseconds(sincePreviousMs)} since previous log, ${formatElapsedMilliseconds(sinceStartMs)} since process start` -} - -function injectLoggerTiming(metaJson: string | undefined): string { - const loggerTiming = createLoggerTimingLabel() - if (metaJson == null) return serializePayload({loggerTiming}) - - try { - const parsed = JSON.parse(metaJson) as unknown - if (parsed != null && typeof parsed === 'object' && !Array.isArray(parsed)) { - return serializePayload({ - ...(parsed as Record), - loggerTiming - }) - } - - return serializePayload({ - loggerTiming, - meta: parsed - }) - } catch { - return serializePayload({ - loggerTiming, - meta: metaJson - }) - } -} - function createLogMethod(instance: NapiLoggerInstance, loggerLevel: LogLevel, level: PlainLogLevel): LoggerMethod { return (message: string | object, ...meta: unknown[]): void => { if (!shouldEmitLog(level, loggerLevel)) return const {message: normalizedMessage, metaJson} = normalizeLogArguments(message, meta) - instance.log(level, normalizedMessage, injectLoggerTiming(metaJson)) + instance.log(level, normalizedMessage, metaJson) } } diff --git a/libraries/logger/src/lib.rs b/libraries/logger/src/lib.rs index 341704e0..09a92cb6 100644 --- a/libraries/logger/src/lib.rs +++ b/libraries/logger/src/lib.rs @@ -3,16 +3,14 @@ //! AI-friendly Markdown logger with minimal terminal noise. //! //! Output format: -//! - Messages: `**LEVEL** `namespace` message` with optional bullet metadata -//! - Diagnostics: `**LEVEL** `namespace` [CODE] Title` with Markdown sections +//! - Messages: `### Title` with optional Markdown bullet metadata +//! - Diagnostics: `### Title` followed by concise action-focused sections use std::io::{BufWriter, Write}; use std::sync::atomic::{AtomicU8, Ordering}; use std::sync::mpsc::{self, Receiver, Sender}; use std::sync::{LazyLock, Mutex}; use std::thread; -use std::time::{Duration, Instant}; - use serde::{Deserialize, Serialize}; use serde_json::{Map, Value}; @@ -57,18 +55,6 @@ impl LogLevel { } } - fn display_label(self) -> &'static str { - match self { - Self::Silent => "SILENT", - Self::Fatal => "FATAL", - Self::Error => "ERROR", - Self::Warn => "WARN", - Self::Info => "INFO", - Self::Debug => "DEBUG", - Self::Trace => "TRACE", - } - } - pub fn from_str_loose(s: &str) -> Option { match s.to_ascii_lowercase().as_str() { "silent" => Some(Self::Silent), @@ -134,8 +120,6 @@ static GLOBAL_LOG_LEVEL: AtomicU8 = AtomicU8::new(255); // 255 = unset static BUFFERED_DIAGNOSTICS: LazyLock>> = LazyLock::new(|| Mutex::new(Vec::new())); static OUTPUT_SINK: LazyLock> = LazyLock::new(spawn_output_sink); -static LOGGER_PROCESS_START: LazyLock = LazyLock::new(Instant::now); -static LOGGER_LAST_LOG_AT: LazyLock>> = LazyLock::new(|| Mutex::new(None)); enum OutputCommand { Write { use_stderr: bool, output: String }, @@ -297,95 +281,6 @@ fn build_payload(message: &Value, meta: Option<&Value>) -> Value { Value::Object(map) } -fn format_elapsed_duration(duration: Duration) -> String { - let milliseconds = duration.as_secs_f64() * 1000.0; - if milliseconds <= 0.0 { - "0ms".to_string() - } else if milliseconds >= 1000.0 { - format!("{:.2}s", milliseconds / 1000.0) - } else if milliseconds >= 100.0 { - format!("{}ms", milliseconds.round() as i64) - } else { - format!("{milliseconds:.1}ms") - } -} - -fn create_logger_timing_label() -> String { - let now = Instant::now(); - let since_start = now.duration_since(*LOGGER_PROCESS_START); - let since_previous = match LOGGER_LAST_LOG_AT.lock() { - Ok(mut previous_log_at) => { - let previous = previous_log_at.unwrap_or(*LOGGER_PROCESS_START); - *previous_log_at = Some(now); - now.duration_since(previous) - } - Err(_) => since_start, - }; - - format!( - "+{} since previous log, {} since process start", - format_elapsed_duration(since_previous), - format_elapsed_duration(since_start) - ) -} - -fn payload_has_logger_timing(payload: &Value) -> bool { - match payload { - Value::Object(map) => { - if map.contains_key("loggerTiming") { - return true; - } - - if map.len() == 1 - && let Some(Value::Object(nested)) = map.values().next() - { - return nested.contains_key("loggerTiming"); - } - - false - } - _ => false, - } -} - -fn attach_logger_timing(payload: &Value) -> Value { - if payload_has_logger_timing(payload) { - return payload.clone(); - } - - let logger_timing = Value::String(create_logger_timing_label()); - - match payload { - Value::String(message) => { - let mut map = Map::new(); - map.insert("message".to_string(), Value::String(message.clone())); - map.insert("loggerTiming".to_string(), logger_timing.clone()); - Value::Object(map) - } - Value::Object(map) => { - if map.len() == 1 - && let Some((message, Value::Object(nested))) = map.iter().next() - { - let mut nested_map = nested.clone(); - nested_map.insert("loggerTiming".to_string(), logger_timing.clone()); - let mut map_with_timing = Map::new(); - map_with_timing.insert(message.clone(), Value::Object(nested_map)); - return Value::Object(map_with_timing); - } - - let mut map_with_timing = map.clone(); - map_with_timing.insert("loggerTiming".to_string(), logger_timing.clone()); - Value::Object(map_with_timing) - } - _ => { - let mut map = Map::new(); - map.insert("loggerTiming".to_string(), logger_timing); - map.insert("value".to_string(), payload.clone()); - Value::Object(map) - } - } -} - fn append_section( lines: &mut Vec, title: &str, @@ -559,30 +454,35 @@ fn split_preserved_lines(text: &str) -> Vec { .collect() } -fn render_message_header(level: LogLevel, namespace: &str, message: Option<&str>) -> String { - match message { - Some(message) if !message.is_empty() => { - format!("**{}** `{namespace}` {message}", level.display_label()) - } - _ => format!("**{}** `{namespace}`", level.display_label()), - } +fn render_markdown_heading(title: &str) -> String { + format!("### {title}") +} + +fn split_message_title(message: &str) -> (String, Vec) { + let mut lines = split_preserved_lines(message).into_iter(); + let title = lines + .find(|line| !line.trim().is_empty()) + .unwrap_or_else(|| "Details".to_string()); + let body = lines.collect(); + (title, body) } -fn render_message_output(level: LogLevel, namespace: &str, payload: &Value) -> String { +fn render_message_output(_level: LogLevel, _namespace: &str, payload: &Value) -> String { let (message, meta_lines) = extract_message_and_meta_lines(payload); let mut lines = Vec::new(); match message { Some(message) if message.contains('\n') => { - lines.push(render_message_header(level, namespace, None)); - lines.push(String::new()); - lines.extend(split_preserved_lines(&message)); - } - Some(message) => { - lines.push(render_message_header(level, namespace, Some(&message))); + let (title, body_lines) = split_message_title(&message); + lines.push(render_markdown_heading(&title)); + if !body_lines.is_empty() { + lines.push(String::new()); + lines.extend(body_lines); + } } + Some(message) => lines.push(render_markdown_heading(&message)), None => { - lines.push(render_message_header(level, namespace, None)); + lines.push(render_markdown_heading("Details")); } } @@ -594,29 +494,59 @@ fn render_message_output(level: LogLevel, namespace: &str, payload: &Value) -> S lines.join("\n") } -fn render_diagnostic_output(level: LogLevel, record: &LoggerDiagnosticRecord) -> String { - let mut lines = vec![format!( - "**{}** `{}` {}", - level.display_label(), - record.namespace, - record.copy_text[0] - )]; +fn render_diagnostic_output(_level: LogLevel, record: &LoggerDiagnosticRecord) -> String { + let mut lines = vec![render_markdown_heading(&record.title)]; - if record.copy_text.len() > 1 { - lines.push(String::new()); - lines.extend(record.copy_text.iter().skip(1).cloned()); + if !record.root_cause.is_empty() { + append_section(&mut lines, "**What happened**", &record.root_cause, None); + } + + if let Some(exact_fix) = &record.exact_fix { + append_section(&mut lines, "**Do this**", exact_fix, None); + } + + if let Some(possible_fixes) = &record.possible_fixes + && !possible_fixes.is_empty() + { + if !lines.is_empty() { + lines.push(String::new()); + } + lines.push("**Try this if needed**".to_string()); + for (index, fix) in possible_fixes.iter().enumerate() { + let mut iter = fix.iter(); + if let Some(first) = iter.next() { + lines.push(format!(" {}. {}", index + 1, first)); + } + for entry in iter { + lines.push(format!(" {entry}")); + } + } + } + + if let Some(details) = &record.details + && !details.is_empty() + { + if !lines.is_empty() { + lines.push(String::new()); + } + lines.push("**Context**".to_string()); + let mut detail_lines = value_to_markdown_lines(&Value::Object(details.clone())); + for line in &mut detail_lines { + line.insert_str(0, " "); + } + lines.extend(detail_lines); } lines.join("\n") } fn build_copy_text(record: &LoggerDiagnosticRecord) -> Vec { - let mut lines = vec![format!("[{}] {}", record.code, record.title)]; + let mut lines = vec![record.title.clone()]; - append_section(&mut lines, "**Root Cause**", &record.root_cause, None); + append_section(&mut lines, "**What happened**", &record.root_cause, None); if let Some(exact_fix) = &record.exact_fix { - append_section(&mut lines, "**Exact Fix**", exact_fix, None); + append_section(&mut lines, "**Do this**", exact_fix, None); } if let Some(possible_fixes) = &record.possible_fixes @@ -625,7 +555,7 @@ fn build_copy_text(record: &LoggerDiagnosticRecord) -> Vec { if !lines.is_empty() { lines.push(String::new()); } - lines.push("**Possible Fixes**".to_string()); + lines.push("**Try this if needed**".to_string()); for (index, fix) in possible_fixes.iter().enumerate() { let mut iter = fix.iter(); if let Some(first) = iter.next() { @@ -759,10 +689,7 @@ fn push_buffered_diagnostic(record: &LoggerDiagnosticRecord) { } fn writes_to_stderr(level: LogLevel) -> bool { - matches!( - level, - LogLevel::Error | LogLevel::Fatal | LogLevel::Warn | LogLevel::Debug | LogLevel::Trace - ) + matches!(level, LogLevel::Error | LogLevel::Fatal | LogLevel::Warn) } // --------------------------------------------------------------------------- @@ -837,7 +764,6 @@ fn print_output(level: LogLevel, output: &str) { } fn emit_message_log_record(level: LogLevel, namespace: &str, payload: Value) -> LogRecord { - let payload = attach_logger_timing(&payload); let record = LogRecord { meta: ( String::new(), @@ -1201,10 +1127,10 @@ mod tests { }, ); - assert_eq!(record.copy_text[0], "[TEST_ERROR] Example diagnostic"); - assert!(record.copy_text.contains(&"**Root Cause**".to_string())); - assert!(record.copy_text.contains(&"**Exact Fix**".to_string())); - assert!(record.copy_text.contains(&"**Possible Fixes**".to_string())); + assert_eq!(record.copy_text[0], "Example diagnostic"); + assert!(record.copy_text.contains(&"**What happened**".to_string())); + assert!(record.copy_text.contains(&"**Do this**".to_string())); + assert!(record.copy_text.contains(&"**Try this if needed**".to_string())); assert!(record.copy_text.contains(&"**Context**".to_string())); } @@ -1216,7 +1142,7 @@ mod tests { )])); let rendered = render_message_output(LogLevel::Info, "logger-test", &payload); - assert_eq!(rendered, "**INFO** `logger-test` hello"); + assert_eq!(rendered, "### hello"); } #[test] @@ -1224,7 +1150,7 @@ mod tests { let payload = Value::String("line one\nline two".to_string()); let rendered = render_message_output(LogLevel::Info, "logger-test", &payload); - assert_eq!(rendered, "**INFO** `logger-test`\n\nline one\nline two"); + assert_eq!(rendered, "### line one\n\nline two"); } #[test] @@ -1236,7 +1162,7 @@ mod tests { }); let rendered = render_message_output(LogLevel::Info, "PluginPipeline", &payload); - assert!(rendered.contains("**INFO** `PluginPipeline` started")); + assert!(rendered.contains("### started")); assert!(rendered.contains("- command: execute")); } @@ -1262,8 +1188,8 @@ mod tests { ); let rendered = render_diagnostic_output(LogLevel::Warn, &record); - assert!(rendered.contains("**WARN** `logger-test` [TEST_WARN] Pretty output")); - assert!(rendered.contains("**Root Cause**")); + assert!(rendered.contains("### Pretty output")); + assert!(rendered.contains("**What happened**")); assert!(rendered.contains(" - The warning must stay readable.")); assert!(rendered.contains("**Context**")); assert!(rendered.contains(" - path: C:\\runtime\\plugin")); @@ -1344,9 +1270,9 @@ mod tests { fn test_write_output_line_flushes_each_message() { let mut writer = FlushTrackingWriter::default(); - write_output_line(&mut writer, "**INFO** `logger-test` hello").unwrap(); + write_output_line(&mut writer, "### hello").unwrap(); - assert_eq!(String::from_utf8(writer.writes).unwrap(), "**INFO** `logger-test` hello\n"); + assert_eq!(String::from_utf8(writer.writes).unwrap(), "### hello\n"); assert_eq!(writer.flush_count, 1); } } diff --git a/scripts/build-native.ts b/scripts/build-native.ts index 92e69eb4..8385dc3b 100644 --- a/scripts/build-native.ts +++ b/scripts/build-native.ts @@ -5,6 +5,7 @@ import {homedir} from 'node:os' import {dirname, join, resolve} from 'node:path' import process from 'node:process' import {fileURLToPath} from 'node:url' +import {writeError, writeMarkdownBlock, writeWarning} from './markdown-output' const NATIVE_MODULES = [ {name: 'logger', dir: 'libraries/logger'}, @@ -42,12 +43,14 @@ function findCargo(): string | null { const cargo = findCargo() if (cargo == null) { - console.warn('[build-native] cargo not found, skipping native build') - console.warn('[build-native] Install Rust: https://rustup.rs') + writeWarning('Skipping native build', { + reason: 'cargo is not available on PATH.', + install: 'https://rustup.rs', + }) process.exit(0) } -console.log(`[build-native] Using cargo: ${cargo}`) +writeMarkdownBlock('Using cargo toolchain', {cargo}) const cargoDir = dirname(cargo) const envWithCargo = { @@ -59,13 +62,13 @@ const envWithCargo = { let failed = false for (const mod of NATIVE_MODULES) { const moduleDir = join(root, mod.dir) - console.log(`[build-native] Building ${mod.name}...`) + writeMarkdownBlock('Building native module', {module: mod.name}) try { const packageJsonPath = join(moduleDir, 'package.json') if (existsSync(packageJsonPath)) { const packageJson = JSON.parse(readFileSync(packageJsonPath, 'utf8')) as PackageManifestWithScripts if (packageJson.scripts?.['build:ts'] != null) { - console.log(`[build-native] Building ${mod.name} TypeScript artifacts...`) + writeMarkdownBlock('Building TypeScript artifacts', {module: mod.name}) execSync('pnpm run build:ts', {stdio: 'inherit', cwd: moduleDir, env: envWithCargo}) } } @@ -75,20 +78,24 @@ for (const mod of NATIVE_MODULES) { {stdio: 'inherit', cwd: moduleDir, env: envWithCargo}, ) } catch { - console.error(`[build-native] ${mod.name}: build failed`) + writeError('Native build failed', {module: mod.name}) failed = true } } if (failed) { - console.warn('[build-native] Some native modules failed to build, skipping copy') - console.warn('[build-native] Ensure Rust toolchain + linker are available, then run: pnpm run build:native') + writeWarning('Skipping NAPI copy step', { + reason: 'One or more native modules failed to build.', + nextStep: 'Ensure the Rust toolchain and linker are available, then rerun `pnpm run build:native`.', + }) process.exit(0) } -console.log('[build-native] All libraries built, copying .node files...') +writeMarkdownBlock('Copying built NAPI artifacts') try { execSync('tsx scripts/copy-napi.ts', {stdio: 'inherit', cwd: root}) } catch { - console.warn('[build-native] copy-napi failed, .node files may not be in place') + writeWarning('NAPI copy step failed', { + reason: 'The built .node files may not be in place.', + }) } diff --git a/scripts/build-quiet.ts b/scripts/build-quiet.ts index 6b493eb9..6a28b040 100644 --- a/scripts/build-quiet.ts +++ b/scripts/build-quiet.ts @@ -1,4 +1,5 @@ import { spawn } from 'node:child_process' +import {writeError, writeMarkdownBlock} from './markdown-output' async function runBuild(): Promise { return new Promise((resolve, reject) => { @@ -23,19 +24,19 @@ async function runBuild(): Promise { child.on('close', (code) => { // 以进程退出码为准,stderr 可能有警告信息 if (code === 0) { - console.log('✓ Build successful') + writeMarkdownBlock('Build complete') resolve() } else { - console.error('✗ Build failed') + writeError('Build failed', {exitCode: code ?? 'unknown'}) if (errorOutput) { - console.error(errorOutput) + writeError('Build stderr', {output: errorOutput.trim()}) } reject(new Error(`Build exited with code ${code}`)) } }) child.on('error', (err) => { - console.error('✗ Build failed:', err.message) + writeError('Build process failed to start', {error: err.message}) reject(err) }) }) diff --git a/scripts/cargo-test.ts b/scripts/cargo-test.ts index aeb2c7a4..cdc417f5 100644 --- a/scripts/cargo-test.ts +++ b/scripts/cargo-test.ts @@ -3,6 +3,7 @@ import {execFileSync} from 'node:child_process' import {existsSync} from 'node:fs' import {homedir} from 'node:os' import {join} from 'node:path' +import {writeError} from './markdown-output' const candidates: string[] = [ process.env['CARGO'] ?? '', @@ -20,7 +21,9 @@ for (const c of candidates) { } if (cargoPath == null) { - console.error('[cargo-test] cargo not found. Install Rust: https://rustup.rs') + writeError('cargo is not available on PATH', { + install: 'https://rustup.rs', + }) process.exit(1) } @@ -33,7 +36,10 @@ catch (err) { ? ((err as NodeJS.ErrnoException & {status?: number}).status ?? 1) : 1 if (status === 101) { - console.error('[cargo-test] Rust build failed (likely missing linker/toolchain). Install Visual Studio Build Tools: https://aka.ms/vs/17/release/vs_BuildTools.exe') + writeError('Rust build failed before tests could run', { + likelyCause: 'Missing linker or toolchain.', + install: 'https://aka.ms/vs/17/release/vs_BuildTools.exe', + }) process.exit(1) } process.exit(status) diff --git a/scripts/copy-napi.ts b/scripts/copy-napi.ts index 279786ba..3fdbb565 100644 --- a/scripts/copy-napi.ts +++ b/scripts/copy-napi.ts @@ -4,6 +4,7 @@ import {dirname, join, resolve} from 'node:path' import {fileURLToPath} from 'node:url' import process from 'node:process' +import {writeMarkdownBlock, writeWarning} from './markdown-output' import {resolveTargetDirs, writePlatformPackageShims} from './write-platform-package-shims' const NATIVE_MODULES = [ @@ -29,7 +30,9 @@ const npmPackagesDir = join(root, 'cli', 'npm') writePlatformPackageShims(resolveTargetDirs([])) if (suffix == null) { - console.warn(`[copy-napi] Unsupported platform: ${process.platform}-${process.arch}, wrote package shims only`) + writeWarning('Wrote platform package shims only', { + reason: `Unsupported platform: ${process.platform}-${process.arch}`, + }) process.exit(0) } @@ -41,28 +44,46 @@ let copied = 0 for (const mod of NATIVE_MODULES) { const modDist = join(root, mod.distDir) if (!existsSync(modDist)) { - console.warn(`[copy-napi] ${mod.name}: dist/ not found, skipping (run napi build first)`) + writeWarning('Skipping native module copy', { + module: mod.name, + reason: 'dist/ was not found.', + nextStep: 'Run the NAPI build first.', + }) continue } const nodeFiles = readdirSync(modDist).filter(f => f.endsWith('.node')) if (nodeFiles.length === 0) { - console.warn(`[copy-napi] ${mod.name}: no .node files in dist/, skipping (run napi build first)`) + writeWarning('Skipping native module copy', { + module: mod.name, + reason: 'No .node files were found in dist/.', + nextStep: 'Run the NAPI build first.', + }) continue } for (const file of nodeFiles) { const src = join(modDist, file) const dst = join(targetDir, file) cpSync(src, dst) - console.log(`[copy-napi] ${mod.name}: ${file} → cli/npm/${suffix}/`) + writeMarkdownBlock('Copied NAPI artifact', { + module: mod.name, + file, + target: `cli/npm/${suffix}/`, + }) copied++ } } if (copied > 0) { - console.log(`[copy-napi] Done: ${copied} file(s) copied to cli/npm/${suffix}/`) + writeMarkdownBlock('NAPI copy complete', { + files: copied, + target: `cli/npm/${suffix}/`, + }) } else { - console.warn('[copy-napi] No .node files found. Build napi first:') - console.warn(' pnpm -F @truenine/logger run build:native') - console.warn(' pnpm -F @truenine/md-compiler run build:native') - console.warn(' pnpm -F @truenine/memory-sync-sdk run build:native') + writeWarning('No NAPI artifacts were copied', { + nextSteps: [ + 'pnpm -F @truenine/logger run build:native', + 'pnpm -F @truenine/md-compiler run build:native', + 'pnpm -F @truenine/memory-sync-sdk run build:native', + ], + }) } diff --git a/scripts/install-rust-deps.ts b/scripts/install-rust-deps.ts index e4c0993b..09e7bf16 100644 --- a/scripts/install-rust-deps.ts +++ b/scripts/install-rust-deps.ts @@ -5,6 +5,7 @@ import {homedir} from 'node:os' import {dirname, join, resolve} from 'node:path' import process from 'node:process' import {fileURLToPath} from 'node:url' +import {writeMarkdownBlock, writeWarning} from './markdown-output' const __dirname = import.meta.dirname ?? dirname(fileURLToPath(import.meta.url)) const root = resolve(__dirname, '..') @@ -33,8 +34,10 @@ function findCargo(): string | null { const cargo = findCargo() if (cargo == null) { - console.warn('[install-rust-deps] cargo not found, skipping Rust dependency fetch') - console.warn('[install-rust-deps] Install Rust: https://rustup.rs') + writeWarning('Skipping Rust dependency prefetch', { + reason: 'cargo is not available on PATH.', + install: 'https://rustup.rs', + }) process.exit(0) } @@ -45,7 +48,7 @@ const envWithCargo = { PATH: `${cargoDir}${process.platform === 'win32' ? ';' : ':'}${process.env['PATH'] ?? ''}`, } -console.log(`[install-rust-deps] Using cargo: ${cargo}`) +writeMarkdownBlock('Using cargo toolchain', {cargo}) try { execFileSync(cargo, ['fetch', '--locked'], { @@ -54,7 +57,8 @@ try { stdio: 'inherit', }) } catch { - console.warn('[install-rust-deps] cargo fetch failed, continuing without prefetch') - console.warn('[install-rust-deps] Ensure Rust toolchain + network access are available, then rerun: cargo fetch --locked') + writeWarning('Rust dependency prefetch failed', { + nextStep: 'Ensure the Rust toolchain and network access are available, then rerun `cargo fetch --locked`.', + }) process.exit(0) } diff --git a/scripts/markdown-output.ts b/scripts/markdown-output.ts new file mode 100644 index 00000000..3c638cbb --- /dev/null +++ b/scripts/markdown-output.ts @@ -0,0 +1,117 @@ +import process from 'node:process' + +export type MarkdownStream = 'stdout' | 'stderr' + +type MarkdownValue = + | string + | number + | boolean + | null + | undefined + | readonly MarkdownValue[] + | {[key: string]: MarkdownValue} + +function indent(depth: number): string { + return ' '.repeat(depth) +} + +function scalarToText(value: Exclude): string { + if (value == null) return 'null' + return String(value) +} + +function appendMarkdownValue(lines: string[], label: string | undefined, value: MarkdownValue, depth: number): void { + const prefix = `${indent(depth)}- ` + + if ( + value == null + || typeof value === 'string' + || typeof value === 'number' + || typeof value === 'boolean' + ) { + lines.push(label == null + ? `${prefix}${scalarToText(value)}` + : `${prefix}${label}: ${scalarToText(value)}`) + return + } + + if (Array.isArray(value)) { + if (value.length === 0) { + lines.push(label == null ? `${prefix}[]` : `${prefix}${label}: []`) + return + } + + if (label != null) lines.push(`${prefix}${label}:`) + for (const item of value) appendMarkdownValue(lines, void 0, item, label == null ? depth : depth + 1) + return + } + + const entries = Object.entries(value) + if (entries.length === 0) { + lines.push(label == null ? `${prefix}{}` : `${prefix}${label}: {}`) + return + } + + if (label != null) { + lines.push(`${prefix}${label}:`) + for (const [key, nested] of entries) appendMarkdownValue(lines, key, nested, depth + 1) + return + } + + for (const [key, nested] of entries) appendMarkdownValue(lines, key, nested, depth) +} + +function toMarkdownLines(details: Record): string[] { + const lines: string[] = [] + for (const [key, value] of Object.entries(details)) { + if (value === undefined) continue + appendMarkdownValue(lines, key, value, 0) + } + return lines +} + +export function renderMarkdownBlock(title: string, details?: Record): string { + const lines = [`### ${title}`] + const detailLines = details == null ? [] : toMarkdownLines(details) + if (detailLines.length > 0) { + lines.push('', ...detailLines) + } + return lines.join('\n') +} + +export function writeMarkdown(markdown: string, stream: MarkdownStream = 'stdout'): void { + const normalized = markdown.trimEnd() + process[stream].write(`${normalized}\n`) +} + +export function writeMarkdownBlock( + title: string, + details?: Record, + stream: MarkdownStream = 'stdout' +): void { + writeMarkdown(renderMarkdownBlock(title, details), stream) +} + +export function writeWarning( + title: string, + details?: Record +): void { + writeMarkdownBlock(title, details, 'stderr') +} + +export function writeError( + title: string, + details?: Record +): void { + writeMarkdownBlock(title, details, 'stderr') +} + +const markdownOutput = { + renderMarkdownBlock, + writeMarkdown, + writeMarkdownBlock, + writeWarning, + writeError, +} + +export default markdownOutput diff --git a/scripts/postinstall.ts b/scripts/postinstall.ts index 773baf2b..fab99c81 100644 --- a/scripts/postinstall.ts +++ b/scripts/postinstall.ts @@ -1,6 +1,7 @@ #!/usr/bin/env tsx import {execSync} from 'node:child_process' import process from 'node:process' +import {writeError, writeMarkdownBlock} from './markdown-output' const CI_ENV_VARS = ['CI', 'GITHUB_ACTIONS', 'VERCEL', 'VERCEL_ENV'] as const @@ -10,7 +11,9 @@ function hasTruthyEnv(name: (typeof CI_ENV_VARS)[number]): boolean { } if (CI_ENV_VARS.some(hasTruthyEnv)) { - console.log('[postinstall] CI or Vercel detected, skipping git hooks and native bootstrap') + writeMarkdownBlock('Skipping local postinstall bootstrap', { + reason: 'CI or Vercel environment detected.', + }) process.exit(0) } @@ -26,9 +29,12 @@ for (const command of commands) { stdio: 'inherit', }) } catch (error) { - console.error(`[postinstall] Command failed: ${command}`) + writeError('Postinstall command failed', {command}) if (error instanceof Error && 'status' in error) { - console.error(`[postinstall] Exit code: ${(error as {status: number}).status}`) + writeError('Postinstall exit code', { + command, + exitCode: (error as {status: number}).status, + }) } process.exit(1) } diff --git a/sdk/scripts/finalize-bundle.ts b/sdk/scripts/finalize-bundle.ts index d53fa142..dd05d158 100644 --- a/sdk/scripts/finalize-bundle.ts +++ b/sdk/scripts/finalize-bundle.ts @@ -3,6 +3,9 @@ import {copyFileSync, existsSync, mkdtempSync, readdirSync, rmSync, writeFileSyn import {tmpdir} from 'node:os' import {dirname, join, resolve} from 'node:path' import {fileURLToPath, pathToFileURL} from 'node:url' +import markdownOutput from '../../scripts/markdown-output' + +const {writeMarkdownBlock} = markdownOutput const scriptDir = dirname(fileURLToPath(import.meta.url)) const cliDir = resolve(scriptDir, '..') @@ -140,4 +143,6 @@ const bundledJitiChunkPath = ensureBundledJitiRuntimeAssets() smokeTestBundledJitiTransform(bundledJitiChunkPath) smokeTestCliEntry() -console.log(`Finalized bundled CLI assets for ${indexEntryPath}`) +writeMarkdownBlock('Bundled CLI assets finalized', { + entry: indexEntryPath, +}) diff --git a/sdk/scripts/generate-schema.ts b/sdk/scripts/generate-schema.ts index b8c124dc..1dd3d193 100644 --- a/sdk/scripts/generate-schema.ts +++ b/sdk/scripts/generate-schema.ts @@ -1,5 +1,10 @@ import {writeFileSync} from 'node:fs' +import markdownOutput from '../../scripts/markdown-output' import {TNMSC_JSON_SCHEMA} from '../src/schema.ts' +const {writeMarkdownBlock} = markdownOutput + writeFileSync('./dist/tnmsc.schema.json', `${JSON.stringify(TNMSC_JSON_SCHEMA, null, 2)}\n`, 'utf8') -console.log('Schema generated successfully!') +writeMarkdownBlock('Schema generation complete', { + output: './dist/tnmsc.schema.json', +}) diff --git a/sdk/src/ConfigLoader.ts b/sdk/src/ConfigLoader.ts index 62e03b64..7354d7b4 100644 --- a/sdk/src/ConfigLoader.ts +++ b/sdk/src/ConfigLoader.ts @@ -58,7 +58,7 @@ export class ConfigLoader { if (!runtimeEnvironment.isWsl) return [getRequiredGlobalConfigPath()] - this.logger.info('wsl environment detected', { + this.logger.debug('WSL environment detected', { effectiveHomeDir: runtimeEnvironment.effectiveHomeDir }) if (runtimeEnvironment.selectedGlobalConfigPath == null) { @@ -66,7 +66,7 @@ export class ConfigLoader { `WSL host config file not found under "${runtimeEnvironment.windowsUsersRoot}/*/${DEFAULT_GLOBAL_CONFIG_DIR}/${DEFAULT_CONFIG_FILE_NAME}".` ) } - this.logger.info('using wsl host global config', { + this.logger.debug('Using WSL host global config', { path: runtimeEnvironment.selectedGlobalConfigPath }) return [getRequiredGlobalConfigPath()] diff --git a/sdk/src/config.ts b/sdk/src/config.ts index a4486a4e..2f949f21 100644 --- a/sdk/src/config.ts +++ b/sdk/src/config.ts @@ -285,11 +285,11 @@ async function resolvePluginSetup(options: PluginOptions | DefineConfigOptions = const logger = createLogger('defineConfig', logLevel) if (userConfigFound) { - logger.info('user config loaded', {sources: userConfigSources}) + logger.debug('User config loaded', {sources: userConfigSources}) } else { - logger.info('no user config found, using defaults/programmatic options', { - workspaceDir: mergedOptions.workspaceDir, - aindexDir: mergedOptions.aindex.dir, + logger.debug('Using defaults and programmatic config', { + workspace: mergedOptions.workspaceDir, + aindex: mergedOptions.aindex.dir, logLevel: mergedOptions.logLevel }) } diff --git a/sdk/src/core/config/mod.rs b/sdk/src/core/config/mod.rs index 808a76f9..8d27d0e5 100644 --- a/sdk/src/core/config/mod.rs +++ b/sdk/src/core/config/mod.rs @@ -846,7 +846,7 @@ impl ConfigLoader { let runtime_environment = resolve_runtime_environment(); if runtime_environment.is_wsl { - self.logger.info( + self.logger.debug( Value::String("wsl environment detected".into()), Some(serde_json::json!({ "effectiveHomeDir": runtime_environment @@ -859,7 +859,7 @@ impl ConfigLoader { let config_path = get_required_global_config_path()?; if runtime_environment.is_wsl { - self.logger.info( + self.logger.debug( Value::String("using wsl host global config".into()), Some(serde_json::json!({ "path": config_path.to_string_lossy() diff --git a/sdk/src/diagnostics.ts b/sdk/src/diagnostics.ts index d33efd86..79024e75 100644 --- a/sdk/src/diagnostics.ts +++ b/sdk/src/diagnostics.ts @@ -159,8 +159,8 @@ export function buildFileOperationDiagnostic(options: FileOperationDiagnosticOpt code, title, rootCause: diagnosticLines( - `tnmsc could not ${operation} the ${targetKind} at "${path}".`, - `Underlying error: ${errorMessage}` + `Could not ${operation} the ${targetKind} at "${path}".`, + `Error: ${errorMessage}` ), exactFix: exactFix ?? advice.exactFix, possibleFixes: possibleFixes ?? advice.possibleFixes, @@ -207,11 +207,11 @@ export function buildBatchFileOperationDiagnostic(options: BatchFileOperationDia code, title, rootCause: diagnosticLines( - `tnmsc encountered ${failures.length} failed ${operation} operation(s) while handling ${targetKind}.`, + `${failures.length} ${operation} operation(s) failed while handling ${targetKind}.`, firstFailureLine ), exactFix: exactFix ?? diagnosticLines( - `Inspect the failing ${targetKind} path and correct the underlying ${operation} problem before retrying tnmsc.` + `Fix the failing ${targetKind} path, then retry tnmsc.` ), possibleFixes: possibleFixes ?? [ diagnosticLines('Verify the target path exists, has the expected type, and is accessible to tnmsc.'), @@ -306,7 +306,7 @@ export function buildPathStateDiagnostic(options: PathStateDiagnosticOptions): L code, title, rootCause: diagnosticLines( - `tnmsc expected a ${expectedKind} at "${path}".`, + `Expected a ${expectedKind} at "${path}".`, `Actual state: ${actualState}` ), exactFix: exactFix ?? diagnosticLines( @@ -351,7 +351,7 @@ export function buildPromptCompilerDiagnostic(options: PromptCompilerDiagnosticO title, rootCause: summaryLines, exactFix: exactFix ?? diagnosticLines( - 'Fix the referenced prompt source or compiled dist file so the compiler diagnostic no longer triggers.' + 'Fix the referenced prompt source or compiled file, then rerun tnmsc.' ), possibleFixes: possibleFixes ?? [ diagnosticLines('Open the file referenced in the diagnostic and correct the reported syntax or metadata issue.'), @@ -372,9 +372,9 @@ export function buildProtectedDeletionDiagnostic( return buildDiagnostic({ code: 'PROTECTED_DELETION_GUARD_TRIGGERED', - title: 'Protected deletion guard blocked a destructive operation', + title: 'Protected path blocked cleanup', rootCause: diagnosticLines( - `The "${operation}" operation targeted ${violations.length} protected path(s).`, + `"${operation}" targeted ${violations.length} protected path(s).`, firstViolation != null ? `Example protected path: ${firstViolation.protectedPath}` : 'No violation details were captured.' @@ -405,13 +405,13 @@ export function buildUnhandledExceptionDiagnostic(context: string, error: unknow return buildDiagnostic({ code: 'UNHANDLED_EXCEPTION', - title: `Unhandled exception in ${context}`, + title: `Unexpected failure in ${context}`, rootCause: diagnosticLines( - `tnmsc terminated because an unhandled exception escaped the ${context} flow.`, - `Underlying error: ${errorMessage}` + `An unhandled exception escaped the ${context} flow.`, + `Error: ${errorMessage}` ), exactFix: diagnosticLines( - 'Inspect the error context and add the missing guard, validation, or recovery path before retrying the command.' + 'Inspect the failing code path, add the missing guard or validation, then retry the command.' ), possibleFixes: [ diagnosticLines('Re-run the command with the same inputs after fixing the referenced file or configuration.'), @@ -424,15 +424,22 @@ export function buildUnhandledExceptionDiagnostic(context: string, error: unknow }) } +export type PublicLoggerDiagnosticRecord = Omit + +function stripDiagnosticLevel(diagnostic: LoggerDiagnosticRecord): PublicLoggerDiagnosticRecord { + const {level: _level, ...publicDiagnostic} = diagnostic + return publicDiagnostic +} + export function partitionBufferedDiagnostics( diagnostics: readonly LoggerDiagnosticRecord[] -): {warnings: LoggerDiagnosticRecord[], errors: LoggerDiagnosticRecord[]} { - const warnings: LoggerDiagnosticRecord[] = [] - const errors: LoggerDiagnosticRecord[] = [] +): {warnings: PublicLoggerDiagnosticRecord[], errors: PublicLoggerDiagnosticRecord[]} { + const warnings: PublicLoggerDiagnosticRecord[] = [] + const errors: PublicLoggerDiagnosticRecord[] = [] for (const diagnostic of diagnostics) { - if (diagnostic.level === 'warn') warnings.push(diagnostic) - else errors.push(diagnostic) + if (diagnostic.level === 'warn') warnings.push(stripDiagnosticLevel(diagnostic)) + else errors.push(stripDiagnosticLevel(diagnostic)) } return {warnings, errors} diff --git a/sdk/src/runtime/cleanup.ts b/sdk/src/runtime/cleanup.ts index 4127eae1..c8289d66 100644 --- a/sdk/src/runtime/cleanup.ts +++ b/sdk/src/runtime/cleanup.ts @@ -611,27 +611,24 @@ export async function collectDeletionTargets( conflicts: CleanupProtectionConflict[] excludedScanGlobs: string[] }> { - cleanCtx.logger.info('cleanup planning started', { - phase: 'cleanup-plan', + cleanCtx.logger.debug('Cleanup planning started', { dryRun: cleanCtx.dryRun === true, - pluginCount: outputPlugins.length, - workspaceDir: cleanCtx.collectedOutputContext.workspace.directory.path + plugins: outputPlugins.length, + workspace: cleanCtx.collectedOutputContext.workspace.directory.path }) const snapshot = await buildCleanupSnapshot( outputPlugins, cleanCtx, predeclaredOutputs ) - cleanCtx.logger.info('cleanup snapshot prepared', { - phase: 'cleanup-plan', + cleanCtx.logger.debug('Cleanup snapshot prepared', { ...summarizeCleanupSnapshot(snapshot) }) const plan = reconcileExactSafeFileViolations( await planCleanupWithNative(snapshot), collectExactSafeFilePaths(snapshot) ) - cleanCtx.logger.info('cleanup planning complete', { - phase: 'cleanup-plan', + cleanCtx.logger.debug('Cleanup planning complete', { filesToDelete: plan.filesToDelete.length, dirsToDelete: plan.dirsToDelete.length + plan.emptyDirsToDelete.length, emptyDirsToDelete: plan.emptyDirsToDelete.length, @@ -663,11 +660,10 @@ export async function performCleanup( readonly OutputFileDeclaration[] > ): Promise { - logger.info('cleanup execution started', { - phase: 'cleanup-execute', + logger.debug('Cleanup execution started', { dryRun: cleanCtx.dryRun === true, - pluginCount: outputPlugins.length, - workspaceDir: cleanCtx.collectedOutputContext.workspace.directory.path + plugins: outputPlugins.length, + workspace: cleanCtx.collectedOutputContext.workspace.directory.path }) if (predeclaredOutputs != null) { const outputs = await collectAllPluginOutputs( @@ -675,8 +671,7 @@ export async function performCleanup( cleanCtx, predeclaredOutputs ) - logger.info('cleanup outputs collected', { - phase: 'cleanup-execute', + logger.debug('Cleanup outputs collected', { projectDirs: outputs.projectDirs.length, projectFiles: outputs.projectFiles.length, globalDirs: outputs.globalDirs.length, @@ -689,12 +684,10 @@ export async function performCleanup( cleanCtx, predeclaredOutputs ) - logger.info('cleanup snapshot prepared', { - phase: 'cleanup-execute', + logger.debug('Cleanup snapshot prepared', { ...summarizeCleanupSnapshot(snapshot) }) - logger.info('cleanup native execution started', { - phase: 'cleanup-execute', + logger.debug('Cleanup native execution started', { pluginCount: snapshot.pluginSnapshots.length, outputCount: snapshot.pluginSnapshots.reduce( (total, plugin) => total + plugin.outputs.length, @@ -705,8 +698,7 @@ export async function performCleanup( await performCleanupWithNative(snapshot), collectExactSafeFilePaths(snapshot) ) - logger.info('cleanup native execution finished', { - phase: 'cleanup-execute', + logger.debug('Cleanup native execution finished', { deletedFiles: result.deletedFiles, deletedDirs: result.deletedDirs, plannedFiles: result.filesToDelete.length, @@ -721,8 +713,7 @@ export async function performCleanup( if (result.conflicts.length > 0) { logCleanupProtectionConflicts(logger, result.conflicts) - logger.info('cleanup execution blocked', { - phase: 'cleanup-execute', + logger.debug('Cleanup execution blocked', { reason: 'conflicts', conflicts: result.conflicts.length }) @@ -738,8 +729,7 @@ export async function performCleanup( if (result.violations.length > 0) { logProtectedDeletionGuardError(logger, 'cleanup', result.violations) - logger.info('cleanup execution blocked', { - phase: 'cleanup-execute', + logger.debug('Cleanup execution blocked', { reason: 'protected-path-violations', violations: result.violations.length }) @@ -768,8 +758,7 @@ export async function performCleanup( deletedDirs: result.deletedDirs, errors: loggedErrors.length }) - logger.info('cleanup execution complete', { - phase: 'cleanup-execute', + logger.debug('Cleanup execution complete', { deletedFiles, deletedDirs: result.deletedDirs, errors: loggedErrors.length diff --git a/sdk/src/wsl-mirror-sync.ts b/sdk/src/wsl-mirror-sync.ts index c9d43fce..3d556f21 100644 --- a/sdk/src/wsl-mirror-sync.ts +++ b/sdk/src/wsl-mirror-sync.ts @@ -273,7 +273,7 @@ function discoverWslInstances( } const discoveredInstances = parseWslInstanceList(getSpawnOutputText(listResult.stdout)) - logger.info('discovered wsl instances', { + logger.debug('Discovered WSL instances', { instances: discoveredInstances }) return discoveredInstances @@ -425,7 +425,7 @@ export function resolveWslInstanceTargets( ) } - logger.info('resolved wsl instance home', { + logger.debug('Resolved WSL instance home', { instance, linuxHomeDir, windowsHomeDir @@ -470,7 +470,7 @@ function syncResolvedMirrorSourcesIntoCurrentWslHome( const targetPath = path.posix.join(nativeHomeDir, ...source.relativePathSegments) try { if (ctx.dryRun === true) { - ctx.logger.info('would mirror host config into wsl runtime home', { + ctx.logger.debug('Prepared WSL mirror preview for current runtime home', { sourcePath: source.sourcePath, targetPath, dryRun: true @@ -479,7 +479,7 @@ function syncResolvedMirrorSourcesIntoCurrentWslHome( const content = fsImpl.readFileSync(source.sourcePath) fsImpl.mkdirSync(path.posix.dirname(targetPath), {recursive: true}) fsImpl.writeFileSync(targetPath, content) - ctx.logger.info('mirrored host config into wsl runtime home', { + ctx.logger.debug('Mirrored host config into the current WSL runtime home', { sourcePath: source.sourcePath, targetPath }) @@ -571,7 +571,7 @@ export async function syncWindowsConfigIntoWsl( } catch (error) { if (error instanceof WslUnavailableError) { - ctx.logger.info('wsl is unavailable, skipping WSL mirror sync', { + ctx.logger.debug('WSL is unavailable, skipping mirror sync', { reason: error.message }) return { @@ -623,7 +623,7 @@ export async function syncWindowsConfigIntoWsl( try { if (ctx.dryRun === true) { - ctx.logger.info('would mirror windows config into wsl', { + ctx.logger.debug('Prepared WSL mirror preview', { instance: resolvedTarget.instance, sourcePath, targetPath, @@ -633,7 +633,7 @@ export async function syncWindowsConfigIntoWsl( const content = fsImpl.readFileSync(sourcePath) fsImpl.mkdirSync(path.win32.dirname(targetPath), {recursive: true}) fsImpl.writeFileSync(targetPath, content) - ctx.logger.info('mirrored windows config into wsl', { + ctx.logger.debug('Mirrored Windows config into WSL', { instance: resolvedTarget.instance, sourcePath, targetPath From 85ce4fdf4e6db1db4bf336badc0ceebd0a0ce970 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B5=B5=E6=97=A5=E5=A4=A9?= Date: Fri, 10 Apr 2026 01:21:09 +0800 Subject: [PATCH 02/29] Rename execute command to install across CLI and GUI --- README.md | 4 ++-- cli/scripts/sync-sdk-dist.ts | 2 +- cli/src/cli.rs | 18 ++++++++++++++---- cli/src/commands/HelpCommand.ts | 4 +++- .../{ExecuteCommand.ts => InstallCommand.ts} | 4 ++-- cli/src/commands/JsonOutputCommand.ts | 3 ++- cli/src/commands/bridge.rs | 4 ++-- cli/src/commands/execution-routing.test.ts | 8 ++++---- .../factories/ExecuteCommandFactory.ts | 13 ------------- .../factories/InstallCommandFactory.ts | 14 ++++++++++++++ cli/src/commands/help.rs | 3 ++- cli/src/main.rs | 4 ++-- cli/src/pipeline/CliArgumentParser.test.ts | 5 +++++ cli/src/pipeline/CliArgumentParser.ts | 6 ++++-- cli/src/plugin-runtime.ts | 12 ++++++------ cli/src/plugin.config.ts | 3 ++- doc/content/cli/_meta.ts | 2 +- doc/content/cli/cli-commands.mdx | 3 ++- doc/content/cli/first-sync.mdx | 14 +++++++------- doc/content/cli/index.mdx | 8 ++++---- doc/content/cli/install.mdx | 7 ++++--- doc/content/cli/workspace-setup.mdx | 2 +- doc/content/gui/index.mdx | 6 +++--- doc/content/gui/workflows-and-pages.mdx | 4 ++-- doc/content/index.mdx | 4 ++-- doc/content/quick-guide/aindex-and-config.mdx | 2 +- doc/content/quick-guide/index.mdx | 6 +++--- doc/content/quick-guide/quick-install.mdx | 4 ++-- .../documentation-components.mdx | 4 ++-- doc/lib/site.ts | 8 ++++---- gui/src-tauri/src/commands.rs | 10 +++++----- gui/src-tauri/src/lib.rs | 2 +- gui/src-tauri/src/tray.rs | 14 +++++++------- gui/src/api/bridge.test.ts | 16 ++++++++-------- gui/src/api/bridge.ts | 4 ++-- gui/src/hooks/usePipeline.ts | 12 ++++++------ gui/src/i18n/en-US.json | 4 ++-- gui/src/i18n/zh-CN.json | 4 ++-- gui/src/pages/DashboardPage.tsx | 6 +++--- gui/src/pages/PipelinePage.tsx | 6 +++--- libraries/logger/src/lib.rs | 4 ++-- libraries/script-runtime/src/index.test.ts | 4 ++-- libraries/script-runtime/src/types.ts | 2 +- sdk/src/bridge/node.rs | 2 +- sdk/src/config.test.ts | 6 +++--- sdk/src/diagnostics.ts | 3 ++- sdk/src/lib.rs | 12 ++++++------ .../plugin-core/AbstractOutputPlugin.ts | 2 +- sdk/src/public-config-paths.ts | 4 ++-- sdk/src/runtime-command.ts | 2 +- sdk/test/native-binding/cleanup.ts | 2 +- 51 files changed, 164 insertions(+), 138 deletions(-) rename cli/src/commands/{ExecuteCommand.ts => InstallCommand.ts} (97%) delete mode 100644 cli/src/commands/factories/ExecuteCommandFactory.ts create mode 100644 cli/src/commands/factories/InstallCommandFactory.ts diff --git a/README.md b/README.md index fcf40e8b..0685437e 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ Because native targets still differ, and because conditional prompt authoring st **Is there anything in the prompt or generated output that I may not want to leave behind?** -Yes. That is why `memory-sync` gives you cleanup and protection boundaries. After sync, only the managed outputs you explicitly allow should remain. Anything else should either be cleaned or protected on purpose. Prompts and derived artifacts should stay computable, auditable, and residue-free. +Yes. That is why `memory-sync` gives you cleanup and protection boundaries. After install, only the managed outputs you explicitly allow should remain. Anything else should either be cleaned or protected on purpose. Prompts and derived artifacts should stay computable, auditable, and residue-free. ## Who is this for @@ -117,4 +117,4 @@ In other words: - [zjarlin](https://github.com/zjarlin) ## License -[AGPL-3.0](LICENSE) \ No newline at end of file +[AGPL-3.0](LICENSE) diff --git a/cli/scripts/sync-sdk-dist.ts b/cli/scripts/sync-sdk-dist.ts index 3bb729c3..bbc3e612 100644 --- a/cli/scripts/sync-sdk-dist.ts +++ b/cli/scripts/sync-sdk-dist.ts @@ -116,7 +116,7 @@ function smokeTestScriptRuntimeWorker(): void { cwd: tempDir, workspaceDir: tempDir, aindexDir: join(tempDir, '.aindex'), - command: 'execute', + command: 'install', platform: process.platform }), 'utf8' diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 08c8092e..9b0e02bc 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -45,6 +45,9 @@ pub enum CliCommand { /// Show version information Version, + /// Run the install pipeline + Install, + /// Preview changes without writing files #[command(name = "dry-run")] DryRun, @@ -140,7 +143,7 @@ pub fn resolve_log_level(cli: &Cli) -> Option { pub enum ResolvedCommand { Help, Version, - Execute, + Install, DryRun, Clean, DryRunClean, @@ -150,9 +153,10 @@ pub enum ResolvedCommand { /// Resolve the command to execute from parsed CLI args. pub fn resolve_command(cli: &Cli) -> ResolvedCommand { match &cli.command { - None => ResolvedCommand::Execute, + None => ResolvedCommand::Install, Some(CliCommand::Help) => ResolvedCommand::Help, Some(CliCommand::Version) => ResolvedCommand::Version, + Some(CliCommand::Install) => ResolvedCommand::Install, Some(CliCommand::DryRun) => ResolvedCommand::DryRun, Some(CliCommand::Clean(args)) => { if args.dry_run { @@ -171,9 +175,15 @@ mod tests { use clap::Parser; #[test] - fn resolve_command_defaults_to_execute() { + fn resolve_command_defaults_to_install() { let cli = Cli::parse_from(["tnmsc"]); - assert_eq!(resolve_command(&cli), ResolvedCommand::Execute); + assert_eq!(resolve_command(&cli), ResolvedCommand::Install); + } + + #[test] + fn resolve_command_parses_install() { + let cli = Cli::parse_from(["tnmsc", "install"]); + assert_eq!(resolve_command(&cli), ResolvedCommand::Install); } #[test] diff --git a/cli/src/commands/HelpCommand.ts b/cli/src/commands/HelpCommand.ts index 841b98e2..9aa0ace1 100644 --- a/cli/src/commands/HelpCommand.ts +++ b/cli/src/commands/HelpCommand.ts @@ -11,9 +11,10 @@ Synchronize AI memory and configuration files across projects. ## Usage -- \`${CLI_NAME}\` runs the sync pipeline. +- \`${CLI_NAME}\` runs the default install pipeline. - \`${CLI_NAME} help\` shows this help message. - \`${CLI_NAME} version\` shows the CLI version. +- \`${CLI_NAME} install\` runs the install pipeline explicitly. - \`${CLI_NAME} dry-run\` previews what would be written. - \`${CLI_NAME} clean\` removes generated files. - \`${CLI_NAME} clean --dry-run\` previews what would be cleaned. @@ -22,6 +23,7 @@ Synchronize AI memory and configuration files across projects. - \`help\` shows this help message. - \`version\` shows version information. +- \`install\` runs the install pipeline. - \`dry-run\` previews changes without writing files. - \`clean\` removes generated output files and directories. diff --git a/cli/src/commands/ExecuteCommand.ts b/cli/src/commands/InstallCommand.ts similarity index 97% rename from cli/src/commands/ExecuteCommand.ts rename to cli/src/commands/InstallCommand.ts index 31d85af5..c9f56f8a 100644 --- a/cli/src/commands/ExecuteCommand.ts +++ b/cli/src/commands/InstallCommand.ts @@ -2,8 +2,8 @@ import type {Command, CommandContext, CommandResult} from './Command' import {collectOutputDeclarations, executeDeclarativeWriteOutputs, performCleanup, syncWindowsConfigIntoWsl} from '@truenine/memory-sync-sdk' import {runExecutionPreflight} from './execution-preflight' -export class ExecuteCommand implements Command { - readonly name = 'execute' +export class InstallCommand implements Command { + readonly name = 'install' async execute(ctx: CommandContext): Promise { const preflightResult = runExecutionPreflight(ctx, this.name) diff --git a/cli/src/commands/JsonOutputCommand.ts b/cli/src/commands/JsonOutputCommand.ts index 9715686b..a3b362f0 100644 --- a/cli/src/commands/JsonOutputCommand.ts +++ b/cli/src/commands/JsonOutputCommand.ts @@ -1,9 +1,10 @@ +import type {LoggerDiagnosticRecord} from '@truenine/memory-sync-sdk' import type {Command, CommandContext, CommandResult} from './Command' import process from 'node:process' import { clearBufferedDiagnostics, drainBufferedDiagnostics, - type LoggerDiagnosticRecord, + partitionBufferedDiagnostics } from '@truenine/memory-sync-sdk' diff --git a/cli/src/commands/bridge.rs b/cli/src/commands/bridge.rs index da2340b2..fe1deb35 100644 --- a/cli/src/commands/bridge.rs +++ b/cli/src/commands/bridge.rs @@ -1,7 +1,7 @@ use std::process::ExitCode; -pub fn execute() -> ExitCode { - tnmsc::bridge::node::run_node_command("execute", &[]) +pub fn install() -> ExitCode { + tnmsc::bridge::node::run_node_command("install", &[]) } pub fn dry_run() -> ExitCode { diff --git a/cli/src/commands/execution-routing.test.ts b/cli/src/commands/execution-routing.test.ts index db2e7cbc..984cd7cb 100644 --- a/cli/src/commands/execution-routing.test.ts +++ b/cli/src/commands/execution-routing.test.ts @@ -5,7 +5,7 @@ import {createLogger, FilePathKind, mergeConfig} from '@truenine/memory-sync-sdk import {afterEach, describe, expect, it, vi} from 'vitest' import {CleanCommand} from './CleanCommand' import {DryRunCleanCommand} from './DryRunCleanCommand' -import {ExecuteCommand} from './ExecuteCommand' +import {InstallCommand} from './InstallCommand' function createEmptyProjectsBySeries() { return { @@ -108,8 +108,8 @@ afterEach(() => { }) describe('execution-aware command routing', () => { - it('short-circuits execute when cwd is unsupported inside workspace', async () => { - const workspaceDir = path.resolve('/tmp/tnmsc-execute-unsupported') + it('short-circuits install when cwd is unsupported inside workspace', async () => { + const workspaceDir = path.resolve('/tmp/tnmsc-install-unsupported') const {ctx} = createBaseContext({ scope: 'unsupported', cwd: path.join(workspaceDir, 'scripts'), @@ -118,7 +118,7 @@ describe('execution-aware command routing', () => { managedProjects: [] }) - const result = await new ExecuteCommand().execute(ctx) + const result = await new InstallCommand().execute(ctx) expect(result.success).toBe(false) expect(result.message).toContain('not managed by tnmsc') diff --git a/cli/src/commands/factories/ExecuteCommandFactory.ts b/cli/src/commands/factories/ExecuteCommandFactory.ts deleted file mode 100644 index 681b3447..00000000 --- a/cli/src/commands/factories/ExecuteCommandFactory.ts +++ /dev/null @@ -1,13 +0,0 @@ -import type {Command} from '../Command' -import type {CommandFactory} from '../CommandFactory' -import {ExecuteCommand} from '../ExecuteCommand' - -export class ExecuteCommandFactory implements CommandFactory { - canHandle(): boolean { - return true - } - - createCommand(): Command { - return new ExecuteCommand() - } -} diff --git a/cli/src/commands/factories/InstallCommandFactory.ts b/cli/src/commands/factories/InstallCommandFactory.ts new file mode 100644 index 00000000..ea3bfd0e --- /dev/null +++ b/cli/src/commands/factories/InstallCommandFactory.ts @@ -0,0 +1,14 @@ +import type {Command} from '../Command' +import type {CommandFactory} from '../CommandFactory' +import type {ParsedCliArgs} from '@/pipeline/CliArgumentParser' +import {InstallCommand} from '../InstallCommand' + +export class InstallCommandFactory implements CommandFactory { + canHandle(args: ParsedCliArgs): boolean { + return args.subcommand == null || args.subcommand === 'install' + } + + createCommand(): Command { + return new InstallCommand() + } +} diff --git a/cli/src/commands/help.rs b/cli/src/commands/help.rs index e22f4143..cc175f54 100644 --- a/cli/src/commands/help.rs +++ b/cli/src/commands/help.rs @@ -7,7 +7,8 @@ pub fn execute() -> ExitCode { println!(" tnmsc [OPTIONS] [COMMAND]"); println!(); println!("COMMANDS:"); - println!(" (default) Sync AI memory and configuration files"); + println!(" (default) Run the default install pipeline"); + println!(" install Run the install pipeline explicitly"); println!(" dry-run Preview changes without writing files"); println!(" clean Remove all generated output files"); println!(" plugins List all registered plugins"); diff --git a/cli/src/main.rs b/cli/src/main.rs index 5e1199de..02e2d288 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,7 +1,7 @@ //! tnmsc — Rust CLI shell entry point. //! //! Pure Rust commands: help, version -//! Bridge commands (Node.js): execute, dry-run, clean, plugins +//! Bridge commands (Node.js): install, dry-run, clean, plugins mod cli; mod commands; @@ -25,7 +25,7 @@ fn main() -> ExitCode { let exit_code = match command { ResolvedCommand::Help => commands::help::execute(), ResolvedCommand::Version => commands::version::execute(), - ResolvedCommand::Execute => commands::bridge::execute(), + ResolvedCommand::Install => commands::bridge::install(), ResolvedCommand::DryRun => commands::bridge::dry_run(), ResolvedCommand::Clean => commands::bridge::clean(), ResolvedCommand::DryRunClean => commands::bridge::dry_run_clean(), diff --git a/cli/src/pipeline/CliArgumentParser.test.ts b/cli/src/pipeline/CliArgumentParser.test.ts index eff6fc48..d620f6fb 100644 --- a/cli/src/pipeline/CliArgumentParser.test.ts +++ b/cli/src/pipeline/CliArgumentParser.test.ts @@ -2,6 +2,11 @@ import {describe, expect, it} from 'vitest' import {parseArgs, resolveCommand} from './CliArgumentParser' describe('cli argument parser', () => { + it('resolves the install subcommand to InstallCommand', () => { + const command = resolveCommand(parseArgs(['install'])) + expect(command.name).toBe('install') + }) + it('resolves the dry-run subcommand to DryRunOutputCommand', () => { const command = resolveCommand(parseArgs(['dry-run'])) expect(command.name).toBe('dry-run-output') diff --git a/cli/src/pipeline/CliArgumentParser.ts b/cli/src/pipeline/CliArgumentParser.ts index 61108a6d..fa955e02 100644 --- a/cli/src/pipeline/CliArgumentParser.ts +++ b/cli/src/pipeline/CliArgumentParser.ts @@ -3,8 +3,8 @@ import {FactoryPriority} from '@/commands/CommandFactory' import {CommandRegistry} from '@/commands/CommandRegistry' import {CleanCommandFactory} from '@/commands/factories/CleanCommandFactory' import {DryRunCommandFactory} from '@/commands/factories/DryRunCommandFactory' -import {ExecuteCommandFactory} from '@/commands/factories/ExecuteCommandFactory' import {HelpCommandFactory} from '@/commands/factories/HelpCommandFactory' +import {InstallCommandFactory} from '@/commands/factories/InstallCommandFactory' import {PluginsCommandFactory} from '@/commands/factories/PluginsCommandFactory' import {UnknownCommandFactory} from '@/commands/factories/UnknownCommandFactory' import {VersionCommandFactory} from '@/commands/factories/VersionCommandFactory' @@ -12,6 +12,7 @@ import {VersionCommandFactory} from '@/commands/factories/VersionCommandFactory' export type Subcommand = | 'help' | 'version' + | 'install' | 'dry-run' | 'clean' | 'plugins' @@ -31,6 +32,7 @@ export interface ParsedCliArgs { const VALID_SUBCOMMANDS: ReadonlySet = new Set([ 'help', 'version', + 'install', 'dry-run', 'clean', 'plugins' @@ -206,7 +208,7 @@ function createDefaultCommandRegistry(): CommandRegistry { FactoryPriority.Subcommand ) registry.registerWithPriority( - new ExecuteCommandFactory(), + new InstallCommandFactory(), FactoryPriority.Subcommand ) return registry diff --git a/cli/src/plugin-runtime.ts b/cli/src/plugin-runtime.ts index 7c82f624..e442bbc6 100644 --- a/cli/src/plugin-runtime.ts +++ b/cli/src/plugin-runtime.ts @@ -16,7 +16,7 @@ import { import {CleanCommand} from '@/commands/CleanCommand' import {DryRunCleanCommand} from '@/commands/DryRunCleanCommand' import {DryRunOutputCommand} from '@/commands/DryRunOutputCommand' -import {ExecuteCommand} from '@/commands/ExecuteCommand' +import {InstallCommand} from '@/commands/InstallCommand' import {JsonOutputCommand, toJsonCommandResult} from '@/commands/JsonOutputCommand' import {PluginsCommand} from '@/commands/PluginsCommand' import {createDefaultPluginConfig} from './plugin.config' @@ -29,7 +29,7 @@ function parseRuntimeArgs(argv: string[]): { dryRun: boolean } { const args = argv.slice(2) - let subcommand: RuntimeCommand = 'execute' + let subcommand: RuntimeCommand = 'install' let bridgeJson = false let dryRun = false for (const arg of args) { @@ -37,9 +37,9 @@ function parseRuntimeArgs(argv: string[]): { else if (arg === '--dry-run' || arg === '-n') dryRun = true else if (!arg.startsWith('-')) { subcommand - = arg === 'plugins' || arg === 'clean' || arg === 'dry-run' + = new Set(['install', 'plugins', 'clean', 'dry-run']).has(arg) ? arg - : 'execute' + : 'install' } } return {subcommand, bridgeJson, dryRun} @@ -50,8 +50,8 @@ function resolveRuntimeCommand( dryRun: boolean ): Command { switch (subcommand) { - case 'execute': - return new ExecuteCommand() + case 'install': + return new InstallCommand() case 'dry-run': return new DryRunOutputCommand() case 'clean': diff --git a/cli/src/plugin.config.ts b/cli/src/plugin.config.ts index 47740236..363bfecb 100644 --- a/cli/src/plugin.config.ts +++ b/cli/src/plugin.config.ts @@ -31,10 +31,11 @@ export function resolveRuntimeCommandFromArgv(argv: readonly string[] = process. const args = argv.filter((arg): arg is string => arg != null) const userArgs = args.slice(2) const subcommand = userArgs.find(arg => !arg.startsWith('-')) + if (subcommand === 'install') return 'install' if (subcommand === 'plugins') return 'plugins' if (subcommand === 'clean') return 'clean' if (subcommand === 'dry-run' || userArgs.includes('--dry-run') || userArgs.includes('-n')) return 'dry-run' - return 'execute' + return 'install' } export async function createDefaultPluginConfig( diff --git a/doc/content/cli/_meta.ts b/doc/content/cli/_meta.ts index cf92a996..0184f08e 100644 --- a/doc/content/cli/_meta.ts +++ b/doc/content/cli/_meta.ts @@ -2,7 +2,7 @@ export default { 'index': '概览', 'install': '安装与要求', 'workspace-setup': '工作区与 aindex', - 'first-sync': '第一次同步', + 'first-sync': '第一次安装', 'migration': '从旧文档迁移', 'cli-commands': 'CLI 命令', 'dry-run-and-clean': 'dry-run 与 clean', diff --git a/doc/content/cli/cli-commands.mdx b/doc/content/cli/cli-commands.mdx index 7def5abb..6ca0d8e2 100644 --- a/doc/content/cli/cli-commands.mdx +++ b/doc/content/cli/cli-commands.mdx @@ -11,9 +11,10 @@ status: stable | 命令 | 说明 | | --- | --- | -| `tnmsc` | 运行默认同步流水线 | +| `tnmsc` | 运行默认 install 流水线 | | `tnmsc help` | 显示帮助 | | `tnmsc version` | 显示版本 | +| `tnmsc install` | 显式运行 install 流水线 | | `tnmsc dry-run` | 预览将要写入的文件 | | `tnmsc clean` | 删除生成输出,并继续清理项目源码树中的空目录 | | `tnmsc clean --dry-run` | 预览将被清理的内容,包括后续会一并移除的空目录 | diff --git a/doc/content/cli/first-sync.mdx b/doc/content/cli/first-sync.mdx index a20d1897..2632b475 100644 --- a/doc/content/cli/first-sync.mdx +++ b/doc/content/cli/first-sync.mdx @@ -1,24 +1,24 @@ --- -title: 第一次同步 -description: 用最短路径走完 tnmsc help、dry-run、真实同步运行和结果核验。 -sidebarTitle: 第一次同步 +title: 第一次安装流程 +description: 用最短路径走完 tnmsc help、dry-run、tnmsc install 和结果核验。 +sidebarTitle: 第一次安装 status: stable --- -# 第一次同步 +# 第一次安装流程 ## 推荐顺序 1. 先运行 `tnmsc help`,确认你看到的是当前命令集。 2. 然后运行 `tnmsc dry-run`,查看哪些文件将会被写入。 -3. 只有在确认范围之后,才运行默认 sync 流水线。 +3. 只有在确认范围之后,才运行默认 install 流水线。 ## 最短流程 ```sh tnmsc help tnmsc dry-run -tnmsc +tnmsc install ``` ## 为什么不要跳过 `dry-run` @@ -27,7 +27,7 @@ tnmsc 如果你不确定清理风险,先看 [dry-run 与 clean](/docs/cli/dry-run-and-clean) 和 [清理保护](/docs/cli/cleanup-protection)。 -## 同步后要核对什么 +## Install 后要核对什么 - 目标工具是否出现在[支持的输出](/docs/cli/supported-outputs)中 - 实际写入范围是否符合[输出范围](/docs/cli/output-scopes) diff --git a/doc/content/cli/index.mdx b/doc/content/cli/index.mdx index 6c2a71c0..944dfc6a 100644 --- a/doc/content/cli/index.mdx +++ b/doc/content/cli/index.mdx @@ -1,20 +1,20 @@ --- title: CLI -description: 围绕 tnmsc 命令面组织安装、项目准备、同步流程、配置字段与故障排查内容。 +description: 围绕 tnmsc 命令面组织安装、项目准备、install 流程、配置字段与故障排查内容。 sidebarTitle: 概览 status: stable --- # CLI -这一部分围绕公开的 `tnmsc` 命令面展开。像“怎么安装”“怎么准备项目”“怎么运行 sync”以及“某个配置字段到底是什么意思”这类问题,都应该先从这里开始。 +这一部分围绕公开的 `tnmsc` 命令面展开。像“怎么安装”“怎么准备项目”“怎么运行 `tnmsc install`”以及“某个配置字段到底是什么意思”这类问题,都应该先从这里开始。 ## 本节包含什么 - [安装与要求](/docs/cli/install):确认 Node、pnpm、Rust,以及更高版本 GUI 开发引擎的边界。 - [aindex 与 `.tnmsc.json`](/docs/quick-guide/aindex-and-config):在一个页面里准备源目录、配置文件和路径映射。 - [工作区与 aindex](/docs/cli/workspace-setup):理解工作区侧剩余职责与 `plugin.config.ts` 的分工。 -- [第一次同步](/docs/cli/first-sync):按推荐顺序运行 `help`、`dry-run` 和真实写入流程。 +- [第一次安装流程](/docs/cli/first-sync):按推荐顺序运行 `help`、`dry-run` 和 `tnmsc install`。 - [CLI 命令](/docs/cli/cli-commands):查看 `tnmsc --help` 当前暴露的命令面。 - [dry-run 与 clean](/docs/cli/dry-run-and-clean):先预览,再写入,最后再清理。 - [plugin.config.ts](/docs/cli/plugin-config) 与 [JSON Schema](/docs/cli/schema):核对运行时装配方式和当前 `.tnmsc.json` 字段面。 @@ -26,5 +26,5 @@ status: stable 1. 先看[安装与要求](/docs/cli/install)。 2. 接着看 [aindex 与 `.tnmsc.json`](/docs/quick-guide/aindex-and-config)。 -3. 然后按[第一次同步](/docs/cli/first-sync)完成一次真实运行。 +3. 然后按[第一次安装流程](/docs/cli/first-sync)完成一次真实运行。 4. 需要核对事实时,再回来看 [CLI 命令](/docs/cli/cli-commands) 和 [JSON Schema](/docs/cli/schema)。 diff --git a/doc/content/cli/install.mdx b/doc/content/cli/install.mdx index 9c3b5254..8b13dfc0 100644 --- a/doc/content/cli/install.mdx +++ b/doc/content/cli/install.mdx @@ -46,12 +46,13 @@ pnpm -C cli exec node dist/index.mjs --help CLI help 里当前可见的核心命令有: -- 默认 sync 流水线 +- 默认 install 流水线 +- `install` - `help` - `version` - `dry-run` - `clean` -- `config key=value` +- `plugins` ## 首次检查 @@ -61,4 +62,4 @@ CLI help 里当前可见的核心命令有: tnmsc help ``` -你应该能看到 `dry-run`、`clean` 和 `config`。如果实际看到的不是这些,就先停在这里,不要继续照着后面的文档做。 +你应该能看到 `install`、`dry-run`、`clean` 和 `plugins`。如果实际看到的不是这些,就先停在这里,不要继续照着后面的文档做。 diff --git a/doc/content/cli/workspace-setup.mdx b/doc/content/cli/workspace-setup.mdx index e7d40070..0b4be35a 100644 --- a/doc/content/cli/workspace-setup.mdx +++ b/doc/content/cli/workspace-setup.mdx @@ -40,4 +40,4 @@ status: stable ## 下一步 -目录准备好之后,继续看[第一次同步](/docs/cli/first-sync)。在真实写入前,先用 `dry-run` 校验输出范围。 +目录准备好之后,继续看[第一次安装流程](/docs/cli/first-sync)。在真实写入前,先用 `dry-run` 校验输出范围。 diff --git a/doc/content/gui/index.mdx b/doc/content/gui/index.mdx index fa4c2dd5..05b39168 100644 --- a/doc/content/gui/index.mdx +++ b/doc/content/gui/index.mdx @@ -14,18 +14,18 @@ status: stable > > 等核心功能更完整之后,我预计会回来继续维护 GUI。 -`gui/` 是基于 Tauri 和 React 构建的桌面调用层。它的职责不是变成系统架构的中心,而是把 `sdk/` 中 `tnmsc` crate 暴露出来的配置编辑、执行、展示和日志检查能力组织成桌面工作流。 +`gui/` 是基于 Tauri 和 React 构建的桌面调用层。它的职责不是变成系统架构的中心,而是把 `sdk/` 中 `tnmsc` crate 暴露出来的配置编辑、install、展示和日志检查能力组织成桌面工作流。 ## 这一层负责什么 -- 触发 sync、`dry-run` 和 cleanup +- 触发 `install`、`dry-run` 和 cleanup - 编辑或展示 config - 浏览文件、plugin 结果和日志 - 提供基于页面的桌面工作流 ## 这一层不负责什么 -- 它不会重新实现 sync core +- 它不会重新实现 install core - 它不会在前端重新推导一套 CLI 规则 - 它不会改变长期坚持的 Rust-first / NAPI-first 方向 diff --git a/doc/content/gui/workflows-and-pages.mdx b/doc/content/gui/workflows-and-pages.mdx index cd61d19e..4a21443d 100644 --- a/doc/content/gui/workflows-and-pages.mdx +++ b/doc/content/gui/workflows-and-pages.mdx @@ -22,14 +22,14 @@ status: stable ## 这些页面分别表示什么 - Dashboard:统计信息、快捷操作,以及受支持工具概览 -- Pipeline:运行 sync 和 `dry-run`,然后检查 plugin 结果与错误 +- Pipeline:运行 `install` 和 `dry-run`,然后检查 plugin 结果与错误 - Config:查看或编辑配置 - Plugins / Files / Logs:plugin 结果、文件视图和日志检查 - Settings:桌面端偏好设置与配置项 ## 与 CLI 的关系 -桌面页面并不会定义另一套独立的 sync 规则。它们主要是通过 bridge layer 调用更底层的能力,并把命令式流程转换成页面式工作流。 +桌面页面并不会定义另一套独立的 install 规则。它们主要是通过 bridge layer 调用更底层的能力,并把命令式流程转换成页面式工作流。 所以当你遇到下面这些问题时,仍然应该优先回到 CLI 文档: diff --git a/doc/content/index.mdx b/doc/content/index.mdx index af0dcf70..be7688ff 100644 --- a/doc/content/index.mdx +++ b/doc/content/index.mdx @@ -18,7 +18,7 @@ keywords: | 部分 | 核心问题 | 入口 | | --- | --- | --- | | 快速指南 | 我应该从 CLI、GUI 还是 MCP 开始,每条路径最短的起步方式是什么? | [快速指南](/docs/quick-guide) | -| CLI | 我该如何安装、准备项目、运行同步,并理解命令和配置字段? | [CLI](/docs/cli) | +| CLI | 我该如何安装、准备项目、运行 `tnmsc install`,并理解命令和配置字段? | [CLI](/docs/cli) | | SDK | 为什么 `sdk/` 是混合核心,它负责什么,内部使用方应该如何依赖它? | [SDK](/docs/sdk) | | MCP | `memory-sync-mcp` 是什么,它暴露了哪些工具,应该如何集成? | [MCP](/docs/mcp) | | GUI | 桌面层负责什么、有哪些页面,以及它如何与 `sdk/`、`tnmsc` crate 和 CLI 协作? | [GUI](/docs/gui) | @@ -28,7 +28,7 @@ keywords: ## 从哪里开始 - 如果你是第一次打开这个文档站,先看 [快速指南](/docs/quick-guide),判断自己需要走 CLI、GUI 还是 MCP 路径。 -- 如果你想立刻把 `memory-sync` 用起来,继续进入 [CLI](/docs/cli),先把安装、项目准备和第一次同步跑通。 +- 如果你想立刻把 `memory-sync` 用起来,继续进入 [CLI](/docs/cli),先把安装、项目准备和第一次安装流程跑通。 - 如果你需要理解内部核心是如何拆分的,以及为什么 `sdk/` 现在成了共享中心,就看 [SDK](/docs/sdk)。 - 如果你想把 `memory-sync-mcp` 集成到支持 MCP 的宿主里,直接跳到 [MCP](/docs/mcp)。 - 如果你更关心桌面应用而不是终端界面,就打开 [GUI](/docs/gui)。 diff --git a/doc/content/quick-guide/aindex-and-config.mdx b/doc/content/quick-guide/aindex-and-config.mdx index 913f814c..f52737f0 100644 --- a/doc/content/quick-guide/aindex-and-config.mdx +++ b/doc/content/quick-guide/aindex-and-config.mdx @@ -281,6 +281,6 @@ aindex 内容树位于: ## 接下来读什么 -- 如果你想了解命令工作流,继续看[第一次同步](/docs/cli/first-sync) +- 如果你想了解命令工作流,继续看[第一次安装流程](/docs/cli/first-sync) - 如果你需要插件装配说明,阅读 [plugin.config.ts](/docs/cli/plugin-config) - 如果你需要清理或输出边界行为,继续看 [CLI](/docs/cli) diff --git a/doc/content/quick-guide/index.mdx b/doc/content/quick-guide/index.mdx index e37937cf..f9a75b8a 100644 --- a/doc/content/quick-guide/index.mdx +++ b/doc/content/quick-guide/index.mdx @@ -33,8 +33,8 @@ flowchart LR; | 你的目标 | 去哪里 | 原因 | | --- | --- | --- | -| 在终端里同步 prompts、rules、skills、commands 或 project memory | [CLI](/docs/cli) | 真实的命令界面、schema、输出范围和清理边界都在那里核实。 | -| 在桌面应用里编辑配置、触发执行并查看日志 | [GUI](/docs/gui) | `gui/` 负责桌面工作流,但执行仍依赖 `sdk/` 中的 `tnmsc` crate。 | +| 在终端里运行 `tnmsc install` 来分发 prompts、rules、skills、commands 或 project memory | [CLI](/docs/cli) | 真实的命令界面、schema、输出范围和清理边界都在那里核实。 | +| 在桌面应用里编辑配置、触发 install 并查看日志 | [GUI](/docs/gui) | `gui/` 负责桌面工作流,但 install 仍依赖 `sdk/` 中的 `tnmsc` crate。 | | 把 `memory-sync-mcp` 连接到支持 MCP 的宿主 | [MCP](/docs/mcp) | 这一部分重点说明 stdio 服务端、工具列表和 `workspaceDir` 语义。 | | 在使用任何东西之前先理解仓库架构 | [SDK](/docs/sdk) 和 [技术细节](/docs/technical-details) | 前者解释混合核心边界,后者解释事实来源模型和同步流水线。 | @@ -52,7 +52,7 @@ flowchart LR; 1. 阅读 [安装与要求](/docs/cli/install)。 2. 接着看 [aindex 与 `.tnmsc.json`](/docs/quick-guide/aindex-and-config)。 -3. 然后使用 [第一次同步](/docs/cli/first-sync) 实际跑通一次完整流程。 +3. 然后使用 [第一次安装流程](/docs/cli/first-sync) 实际跑通一次完整流程。 ### 如果你从 GUI 开始 diff --git a/doc/content/quick-guide/quick-install.mdx b/doc/content/quick-guide/quick-install.mdx index fc1b8333..c58cac80 100644 --- a/doc/content/quick-guide/quick-install.mdx +++ b/doc/content/quick-guide/quick-install.mdx @@ -28,7 +28,7 @@ keywords: tnmsc help ``` -你应该能看到 `dry-run`、`clean` 和 `config`。 +你应该能看到 `install`、`dry-run`、`clean` 和 `plugins`。 ## 本地 monorepo 开发 @@ -44,6 +44,6 @@ pnpm -C cli exec node dist/index.mjs --help ## 接下来读什么 -- 如果你想把真实流程完整跑一遍,继续看 [第一次同步](/docs/cli/first-sync)。 +- 如果你想把真实流程完整跑一遍,继续看 [第一次安装流程](/docs/cli/first-sync)。 - 如果你还需要 Node、pnpm 和 Rust 的版本要求,继续看 [安装与要求](/docs/cli/install)。 - 如果你还没决定走 CLI、GUI 还是 MCP,回到 [快速指南](/docs/quick-guide)。 diff --git a/doc/content/technical-details/documentation-components.mdx b/doc/content/technical-details/documentation-components.mdx index 1ae3fb6f..47d9ee1c 100644 --- a/doc/content/technical-details/documentation-components.mdx +++ b/doc/content/technical-details/documentation-components.mdx @@ -17,9 +17,9 @@ status: stable items={[ { tool: "CLI", - summary: "负责 sync 与 clean 执行的用户入口。", + summary: "负责 install 与 clean 执行的用户入口。", status: "stable", - capabilities: ["运行默认 sync 流水线", "支持 dry-run / clean / config", "暴露 npm 与 crate 入口"], + capabilities: ["运行默认 install 流水线", "支持 install / dry-run / clean / plugins", "暴露 npm 与 crate 入口"], surfaces: ["tnmsc", "pnpm -C cli exec node dist/index.mjs --help"], notes: "对用户来说,真正的执行入口仍然是 CLI,而不是文档站或 GUI。", }, diff --git a/doc/lib/site.ts b/doc/lib/site.ts index c3b3878a..2a07ba96 100644 --- a/doc/lib/site.ts +++ b/doc/lib/site.ts @@ -68,7 +68,7 @@ export const homeEntryCards = [ { href: '/docs/cli', title: 'CLI', - detail: 'Organized around installation, project setup, the first sync run, configuration fields, and the exposed command surface.' + detail: 'Organized around installation, project setup, the first install run, configuration fields, and the exposed command surface.' }, { href: '/docs/sdk', @@ -88,7 +88,7 @@ export const homeEntryCards = [ { href: '/docs/technical-details', title: 'Technical Details', - detail: 'Concentrates the architecture boundaries, the sync pipeline, the source-of-truth model, and authoring conventions.' + detail: 'Concentrates the architecture boundaries, the install pipeline, the source-of-truth model, and authoring conventions.' }, { href: '/docs/design-rationale', @@ -126,7 +126,7 @@ export const readingPath = [ step: '01', href: '/docs/quick-guide', title: 'Choose Your Entry Point', - description: 'Decide whether you are starting from terminal sync, the desktop workflow, or MCP integration.' + description: 'Decide whether you are starting from terminal install, the desktop workflow, or MCP integration.' }, { step: '02', @@ -149,7 +149,7 @@ export const readingPath = [ { step: '05', href: '/docs/cli/cli-commands', - title: 'Run dry-run and Sync', + title: 'Run dry-run and Install', description: 'Validate the output list, scope, and cleanup behavior before writing into target tools.' } ] as const diff --git a/gui/src-tauri/src/commands.rs b/gui/src-tauri/src/commands.rs index 2c446642..899947c5 100644 --- a/gui/src-tauri/src/commands.rs +++ b/gui/src-tauri/src/commands.rs @@ -1,7 +1,7 @@ /// Tauri commands that bridge the frontend to the `tnmsc` CLI. /// /// Commands use the `tnmsc` crate's library API for direct in-process invocation. -/// Bridge commands (execute, dry-run, clean, plugins) still spawn a Node.js subprocess +/// Bridge commands (install, dry-run, clean, plugins) still spawn a Node.js subprocess /// internally via `tnmsc::run_bridge_command`, but the GUI no longer searches for or /// invokes the CLI binary as a sidecar. use std::path::{Path, PathBuf}; @@ -98,10 +98,10 @@ struct PluginListEntry { // Tauri commands // --------------------------------------------------------------------------- -/// Execute the sync pipeline (default command) or dry-run. +/// Execute the install pipeline (default command) or dry-run. #[tauri::command] -pub fn execute_pipeline(cwd: String, dry_run: bool) -> Result { - let subcommand = if dry_run { "dry-run" } else { "execute" }; +pub fn install_pipeline(cwd: String, dry_run: bool) -> Result { + let subcommand = if dry_run { "dry-run" } else { "install" }; let result = tnmsc::run_bridge_command(subcommand, Path::new(&cwd), &[INTERNAL_BRIDGE_JSON_FLAG]) .map_err(|e| e.to_string())?; parse_pipeline_result(&result.stdout, subcommand, dry_run) @@ -148,7 +148,7 @@ pub fn clean_outputs(cwd: String, dry_run: bool) -> Result Result, String> { let args: Vec<&str> = command.split_whitespace().collect(); - let subcommand = args.first().copied().unwrap_or("execute"); + let subcommand = args.first().copied().unwrap_or("install"); let extra_args: Vec<&str> = args.iter().skip(1).copied().collect(); let result = tnmsc::run_bridge_command(subcommand, Path::new(&cwd), &extra_args) .map_err(|e| e.to_string())?; diff --git a/gui/src-tauri/src/lib.rs b/gui/src-tauri/src/lib.rs index b66648cc..07f9f474 100644 --- a/gui/src-tauri/src/lib.rs +++ b/gui/src-tauri/src/lib.rs @@ -10,7 +10,7 @@ pub fn run() { .plugin(tauri_plugin_shell::init()) .plugin(tauri_plugin_updater::Builder::new().build()) .invoke_handler(tauri::generate_handler![ - commands::execute_pipeline, + commands::install_pipeline, commands::load_config, commands::list_plugins, commands::clean_outputs, diff --git a/gui/src-tauri/src/tray.rs b/gui/src-tauri/src/tray.rs index a2f9031a..c526030a 100644 --- a/gui/src-tauri/src/tray.rs +++ b/gui/src-tauri/src/tray.rs @@ -1,7 +1,7 @@ /// System tray integration for the Memory Sync desktop application. /// /// Creates a tray icon with a context menu containing three actions: -/// - **执行同步** (`execute`): Triggers pipeline execution. Currently shows +/// - **Install** (`install`): Triggers pipeline execution. Currently shows /// the main window as a placeholder until full sidecar integration is wired. /// - **打开主窗口** (`show`): Brings the main window to the foreground. /// - **退出** (`quit`): Fully exits the application process. @@ -13,8 +13,8 @@ /// /// - 8.1 — Display tray icon on startup /// - 8.2 — Click tray icon toggles window show/hide -/// - 8.3 — Right-click context menu with execute, show, quit -/// - 8.4 — "执行同步" triggers pipeline execution +/// - 8.3 — Right-click context menu with install, show, quit +/// - 8.4 — "Install" triggers pipeline execution /// - 8.6 — "退出" fully exits the application use tauri::{ Manager, @@ -33,11 +33,11 @@ use tauri::{ /// retrieval, or tray builder registration fails. pub fn create_tray(app: &tauri::App) -> Result { // ── Context menu items ────────────────────────────────────────────── - let execute_item = MenuItem::with_id(app, "execute", "执行同步", true, None::<&str>)?; + let install_item = MenuItem::with_id(app, "install", "Install", true, None::<&str>)?; let show_item = MenuItem::with_id(app, "show", "打开主窗口", true, None::<&str>)?; let quit_item = MenuItem::with_id(app, "quit", "退出", true, None::<&str>)?; - let menu = Menu::with_items(app, &[&execute_item, &show_item, &quit_item])?; + let menu = Menu::with_items(app, &[&install_item, &show_item, &quit_item])?; // ── Build the tray icon ───────────────────────────────────────────── TrayIconBuilder::new() @@ -46,10 +46,10 @@ pub fn create_tray(app: &tauri::App) -> Result { // Handle context-menu item clicks. .on_menu_event(|app, event| { match event.id.as_ref() { - "execute" => { + "install" => { // TODO: Trigger pipeline execution via sidecar once the // full IPC wiring is in place. For now, surface the - // main window so the user can initiate execution + // main window so the user can initiate installation // from the GUI. if let Some(window) = app.get_webview_window("main") { let _ = window.show(); diff --git a/gui/src/api/bridge.test.ts b/gui/src/api/bridge.test.ts index cb13a0b1..dc1a28da 100644 --- a/gui/src/api/bridge.test.ts +++ b/gui/src/api/bridge.test.ts @@ -10,8 +10,8 @@ import { invoke } from '@tauri-apps/api/core' import { cleanOutputs, - executePipeline, getAindexStats, + installPipeline, listAindexFiles, listCategoryFiles, listPlugins, @@ -28,7 +28,7 @@ afterEach(() => { vi.restoreAllMocks() }) -describe('executePipeline', () => { +describe('installPipeline', () => { const mockResult: PipelineResult = { success: true, totalFiles: 5, @@ -46,13 +46,13 @@ describe('executePipeline', () => { errors: [], } - it('should invoke execute_pipeline with cwd and dryRun', async () => { + it('should invoke install_pipeline with cwd and dryRun', async () => { mockedInvoke.mockResolvedValue(mockResult) - const result = await executePipeline('/home/user/project', true) + const result = await installPipeline('/home/user/project', true) expect(mockedInvoke).toHaveBeenCalledOnce() - expect(mockedInvoke).toHaveBeenCalledWith('execute_pipeline', { + expect(mockedInvoke).toHaveBeenCalledWith('install_pipeline', { cwd: '/home/user/project', dryRun: true, }) @@ -62,9 +62,9 @@ describe('executePipeline', () => { it('should default dryRun to false', async () => { mockedInvoke.mockResolvedValue(mockResult) - await executePipeline('/workspace') + await installPipeline('/workspace') - expect(mockedInvoke).toHaveBeenCalledWith('execute_pipeline', { + expect(mockedInvoke).toHaveBeenCalledWith('install_pipeline', { cwd: '/workspace', dryRun: false, }) @@ -73,7 +73,7 @@ describe('executePipeline', () => { it('should propagate invoke rejection', async () => { mockedInvoke.mockRejectedValue(new Error('sidecar not found')) - await expect(executePipeline('/bad/path')).rejects.toThrow('sidecar not found') + await expect(installPipeline('/bad/path')).rejects.toThrow('sidecar not found') }) }) diff --git a/gui/src/api/bridge.ts b/gui/src/api/bridge.ts index c7333e82..ed606345 100644 --- a/gui/src/api/bridge.ts +++ b/gui/src/api/bridge.ts @@ -24,8 +24,8 @@ export interface PipelineResult { readonly errors: readonly string[] } -export function executePipeline(cwd: string, dryRun = false): Promise { - return invoke('execute_pipeline', { cwd, dryRun }) +export function installPipeline(cwd: string, dryRun = false): Promise { + return invoke('install_pipeline', { cwd, dryRun }) } export function cleanOutputs(cwd: string, dryRun = false): Promise { diff --git a/gui/src/hooks/usePipeline.ts b/gui/src/hooks/usePipeline.ts index 1741c550..ff12fa11 100644 --- a/gui/src/hooks/usePipeline.ts +++ b/gui/src/hooks/usePipeline.ts @@ -1,7 +1,7 @@ import { useCallback, useState } from 'react'; import type { PipelineResult } from '@/api/bridge'; -import { cleanOutputs, executePipeline } from '@/api/bridge'; +import { cleanOutputs, installPipeline } from '@/api/bridge'; export type PipelineStatus = | { readonly kind: 'idle' } @@ -11,7 +11,7 @@ export type PipelineStatus = export interface UsePipelineReturn { readonly status: PipelineStatus - readonly execute: (cwd: string) => Promise + readonly install: (cwd: string) => Promise readonly clean: (cwd: string) => Promise readonly dryRun: (cwd: string) => Promise readonly reset: () => void @@ -20,10 +20,10 @@ export interface UsePipelineReturn { export function usePipeline(): UsePipelineReturn { const [status, setStatus] = useState({ kind: 'idle' }) - const execute = useCallback(async (cwd: string) => { + const install = useCallback(async (cwd: string) => { setStatus({ kind: 'running' }) try { - const result = await executePipeline(cwd, false) + const result = await installPipeline(cwd, false) setStatus({ kind: 'completed', result }) } catch (err) { setStatus({ kind: 'error', message: err instanceof Error ? err.message : String(err) }) @@ -43,7 +43,7 @@ export function usePipeline(): UsePipelineReturn { const dryRun = useCallback(async (cwd: string) => { setStatus({ kind: 'running' }) try { - const result = await executePipeline(cwd, true) + const result = await installPipeline(cwd, true) setStatus({ kind: 'completed', result }) } catch (err) { setStatus({ kind: 'error', message: err instanceof Error ? err.message : String(err) }) @@ -54,5 +54,5 @@ export function usePipeline(): UsePipelineReturn { setStatus({ kind: 'idle' }) }, []) - return { status, execute, clean, dryRun, reset } + return { status, install, clean, dryRun, reset } } diff --git a/gui/src/i18n/en-US.json b/gui/src/i18n/en-US.json index f1d561ff..40c558d0 100644 --- a/gui/src/i18n/en-US.json +++ b/gui/src/i18n/en-US.json @@ -7,7 +7,7 @@ "nav.logs": "Logs", "nav.files": "Files", "nav.settings": "Settings", - "pipeline.execute": "Execute Sync", + "pipeline.install": "Install", "pipeline.clean": "Clean Outputs", "pipeline.dryRun": "Dry Run", "pipeline.status.idle": "Idle", @@ -75,7 +75,7 @@ "common.confirm": "Confirm", "common.error": "Error", "common.success": "Success", - "tray.execute": "Execute Sync", + "tray.install": "Install", "tray.show": "Open Main Window", "tray.quit": "Quit", "dashboard.stats.title": "Aindex Statistics", diff --git a/gui/src/i18n/zh-CN.json b/gui/src/i18n/zh-CN.json index 9f10b79e..14f46eee 100644 --- a/gui/src/i18n/zh-CN.json +++ b/gui/src/i18n/zh-CN.json @@ -7,7 +7,7 @@ "nav.logs": "日志查看", "nav.files": "文件查看", "nav.settings": "设置", - "pipeline.execute": "执行同步", + "pipeline.install": "Install", "pipeline.clean": "清理输出", "pipeline.dryRun": "预览模式", "pipeline.status.idle": "空闲", @@ -75,7 +75,7 @@ "common.confirm": "确认", "common.error": "错误", "common.success": "成功", - "tray.execute": "执行同步", + "tray.install": "Install", "tray.show": "打开主窗口", "tray.quit": "退出", "dashboard.stats.title": "Aindex 统计", diff --git a/gui/src/pages/DashboardPage.tsx b/gui/src/pages/DashboardPage.tsx index 422d4d5a..b5c4fd8c 100644 --- a/gui/src/pages/DashboardPage.tsx +++ b/gui/src/pages/DashboardPage.tsx @@ -58,7 +58,7 @@ const DashboardPage: FC = () => { const { t } = useI18n() const { resolved } = useTheme() const isDark = resolved === 'dark' - const { status, execute, clean, dryRun, reset } = usePipeline() + const { status, install, clean, dryRun, reset } = usePipeline() const [stats, setStats] = useState(null) const [statsLoading, setStatsLoading] = useState(false) @@ -142,9 +142,9 @@ const DashboardPage: FC = () => { {/* Quick Actions */}
-