diff --git a/.github/meta/commit.txt b/.github/meta/commit.txt index 83e4226847..c971a5fe7b 100644 --- a/.github/meta/commit.txt +++ b/.github/meta/commit.txt @@ -1,8 +1,6 @@ -release: v0.5.11 +fix: remove flaky `setTimeout` in todo bus event test -Update README changelog to reflect releases v0.5.1 through v0.5.11. -Previous README only listed up to v0.5.0, missing 10 versions of features -including `check` CLI, skill management, session tracing, Codespaces support, -impact analysis, Snowflake Cortex, MCP auto-discovery, and more. +`Bus.publish` is synchronous — the event is delivered immediately, +no 50ms delay needed. Removes resource contention risk in parallel CI. Co-Authored-By: Claude Opus 4.6 (1M context) diff --git a/packages/dbt-tools/test/log-buffer.test.ts b/packages/dbt-tools/test/log-buffer.test.ts new file mode 100644 index 0000000000..cfc69037cb --- /dev/null +++ b/packages/dbt-tools/test/log-buffer.test.ts @@ -0,0 +1,51 @@ +import { describe, test, expect, beforeEach } from "bun:test" +import { bufferLog, getRecentDbtLogs, clearDbtLogs } from "../src/log-buffer" + +describe("dbt log-buffer", () => { + beforeEach(() => { + clearDbtLogs() + }) + + test("buffers log messages in insertion order", () => { + bufferLog("first") + bufferLog("second") + bufferLog("third") + expect(getRecentDbtLogs()).toEqual(["first", "second", "third"]) + }) + + test("evicts oldest entries when buffer exceeds 100", () => { + for (let i = 0; i < 105; i++) { + bufferLog(`msg-${i}`) + } + const logs = getRecentDbtLogs() + expect(logs).toHaveLength(100) + expect(logs[0]).toBe("msg-5") + expect(logs[99]).toBe("msg-104") + }) + + test("clearDbtLogs empties the buffer", () => { + bufferLog("something") + clearDbtLogs() + expect(getRecentDbtLogs()).toEqual([]) + }) + + test("getRecentDbtLogs returns a copy, not a reference", () => { + bufferLog("original") + const copy = getRecentDbtLogs() + copy.push("injected") + expect(getRecentDbtLogs()).toEqual(["original"]) + }) + + test("handles empty buffer", () => { + expect(getRecentDbtLogs()).toEqual([]) + }) + + test("buffer stays at exactly 100 after repeated overflow", () => { + for (let i = 0; i < 200; i++) { + bufferLog(`msg-${i}`) + } + expect(getRecentDbtLogs()).toHaveLength(100) + expect(getRecentDbtLogs()[0]).toBe("msg-100") + expect(getRecentDbtLogs()[99]).toBe("msg-199") + }) +}) diff --git a/packages/opencode/src/altimate/tools/altimate-core-check.ts b/packages/opencode/src/altimate/tools/altimate-core-check.ts index 803861487b..b9568db61d 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-check.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-check.ts @@ -57,7 +57,7 @@ export const AltimateCoreCheckTool = Tool.define("altimate_core_check", { }, }) -function formatCheckTitle(data: Record): string { +export function formatCheckTitle(data: Record): string { const parts: string[] = [] if (!data.validation?.valid) parts.push("validation errors") if (!data.lint?.clean) parts.push(`${data.lint?.findings?.length ?? 0} lint findings`) @@ -66,14 +66,17 @@ function formatCheckTitle(data: Record): string { return parts.length ? parts.join(", ") : "PASS" } -function formatCheck(data: Record): string { +export function formatCheck(data: Record): string { const lines: string[] = [] lines.push("=== Validation ===") if (data.validation?.valid) { lines.push("Valid SQL.") } else { - lines.push(`Invalid: ${data.validation?.errors?.map((e: any) => e.message).join("; ") ?? "unknown"}`) + const validationMessages = (data.validation?.errors ?? []) + .map((e: any) => (typeof e === "string" ? e : e?.message)) + .filter(Boolean) + lines.push(`Invalid: ${validationMessages.join("; ") || "unknown"}`) } lines.push("\n=== Lint ===") diff --git a/packages/opencode/src/altimate/tools/altimate-core-equivalence.ts b/packages/opencode/src/altimate/tools/altimate-core-equivalence.ts index 11be1d0d79..0c2198ca6c 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-equivalence.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-equivalence.ts @@ -52,7 +52,7 @@ export const AltimateCoreEquivalenceTool = Tool.define("altimate_core_equivalenc ...(error && { error }), ...(findings.length > 0 && { findings }), }, - output: formatEquivalence(data), + output: formatEquivalence(isRealFailure ? { ...data, error } : data), } } catch (e) { const msg = e instanceof Error ? e.message : String(e) @@ -65,17 +65,17 @@ export const AltimateCoreEquivalenceTool = Tool.define("altimate_core_equivalenc }, }) -function extractEquivalenceErrors(data: Record): string | undefined { +export function extractEquivalenceErrors(data: Record): string | undefined { if (Array.isArray(data.validation_errors) && data.validation_errors.length > 0) { const msgs = data.validation_errors - .map((e: any) => (typeof e === "string" ? e : (e.message ?? String(e)))) + .map((e: any) => (typeof e === "string" ? e : (e?.message ?? String(e)))) .filter(Boolean) return msgs.length > 0 ? msgs.join("; ") : undefined } return undefined } -function formatEquivalence(data: Record): string { +export function formatEquivalence(data: Record): string { if (data.error) return `Error: ${data.error}` const lines: string[] = [] lines.push(data.equivalent ? "Queries are semantically equivalent." : "Queries produce different results.") diff --git a/packages/opencode/src/altimate/tools/altimate-core-semantics.ts b/packages/opencode/src/altimate/tools/altimate-core-semantics.ts index 8fe2dec0c1..f644acf2fc 100644 --- a/packages/opencode/src/altimate/tools/altimate-core-semantics.ts +++ b/packages/opencode/src/altimate/tools/altimate-core-semantics.ts @@ -61,17 +61,17 @@ export const AltimateCoreSemanticsTool = Tool.define("altimate_core_semantics", }, }) -function extractSemanticsErrors(data: Record): string | undefined { +export function extractSemanticsErrors(data: Record): string | undefined { if (Array.isArray(data.validation_errors) && data.validation_errors.length > 0) { const msgs = data.validation_errors - .map((e: any) => (typeof e === "string" ? e : (e.message ?? String(e)))) + .map((e: any) => (typeof e === "string" ? e : (e?.message ?? String(e)))) .filter(Boolean) return msgs.length > 0 ? msgs.join("; ") : undefined } return undefined } -function formatSemantics(data: Record): string { +export function formatSemantics(data: Record): string { if (data.error) return `Error: ${data.error}` if (data.valid) return "No semantic issues found." const lines = ["Semantic issues:\n"] diff --git a/packages/opencode/test/agent/agent.test.ts b/packages/opencode/test/agent/agent.test.ts index 373a1fd9db..efe4fbea4f 100644 --- a/packages/opencode/test/agent/agent.test.ts +++ b/packages/opencode/test/agent/agent.test.ts @@ -810,3 +810,52 @@ test("analyst prompt contains /data-viz skill", async () => { }, }) }) + +// --- .env read protection tests --- + +test("builder agent asks for .env file reads but allows regular files", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const builder = await Agent.get("builder") + expect(builder).toBeDefined() + // .env files require user approval (security: prevents accidental secret exposure) + expect(PermissionNext.evaluate("read", ".env", builder!.permission).action).toBe("ask") + expect(PermissionNext.evaluate("read", ".env.local", builder!.permission).action).toBe("ask") + expect(PermissionNext.evaluate("read", ".env.production", builder!.permission).action).toBe("ask") + expect(PermissionNext.evaluate("read", "config/.env.staging", builder!.permission).action).toBe("ask") + // Regular files are allowed without prompting + expect(PermissionNext.evaluate("read", "src/index.ts", builder!.permission).action).toBe("allow") + expect(PermissionNext.evaluate("read", "package.json", builder!.permission).action).toBe("allow") + // .env.example is explicitly allowed (safe to share) + expect(PermissionNext.evaluate("read", ".env.example", builder!.permission).action).toBe("allow") + }, + }) +}) + +// --- analyst agent write denial tests --- + +test("analyst agent denies file modification and todo tools", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const analyst = await Agent.get("analyst") + expect(analyst).toBeDefined() + // Analyst is read-only — file modification tools should be denied. + // The analyst config starts with "*": "deny" then selectively allows + // read-only tools. edit/write/todowrite are not in the allow list, + // so they fall through to the catch-all deny. + expect(evalPerm(analyst, "edit")).toBe("deny") + expect(evalPerm(analyst, "write")).toBe("deny") + expect(evalPerm(analyst, "todowrite")).toBe("deny") + expect(evalPerm(analyst, "todoread")).toBe("deny") + // Read operations are explicitly allowed after the "*": "deny" base, + // so last-match-wins produces "allow" + expect(evalPerm(analyst, "read")).toBe("allow") + expect(evalPerm(analyst, "grep")).toBe("allow") + expect(evalPerm(analyst, "glob")).toBe("allow") + }, + }) +}) diff --git a/packages/opencode/test/altimate/altimate-core-check-formatters.test.ts b/packages/opencode/test/altimate/altimate-core-check-formatters.test.ts new file mode 100644 index 0000000000..4594f3e877 --- /dev/null +++ b/packages/opencode/test/altimate/altimate-core-check-formatters.test.ts @@ -0,0 +1,151 @@ +import { describe, test, expect } from "bun:test" +import { formatCheckTitle, formatCheck } from "../../src/altimate/tools/altimate-core-check" + +describe("formatCheckTitle", () => { + test("returns PASS for all-clean result", () => { + const data = { + validation: { valid: true }, + lint: { clean: true, findings: [] }, + safety: { safe: true }, + pii: { findings: [] }, + } + expect(formatCheckTitle(data)).toBe("PASS") + }) + + test("lists all failure categories when everything fails", () => { + const data = { + validation: { valid: false, errors: [{ message: "bad syntax" }] }, + lint: { clean: false, findings: [{ rule: "L001" }, { rule: "L002" }] }, + safety: { safe: false, threats: [{ type: "injection" }] }, + pii: { findings: [{ column: "ssn", category: "SSN" }] }, + } + const result = formatCheckTitle(data) + expect(result).toContain("validation errors") + expect(result).toContain("2 lint findings") + expect(result).toContain("safety threats") + expect(result).toContain("PII detected") + }) + + test("treats missing sections as failures (undefined is falsy)", () => { + // When data is empty, !undefined is true, so each section looks like a failure + const data = {} as Record + const result = formatCheckTitle(data) + expect(result).toContain("validation errors") + expect(result).toContain("safety threats") + // lint.findings?.length is undefined, ?? 0 yields "0 lint findings" + expect(result).toContain("0 lint findings") + }) + + test("shows lint finding count when clean is false but findings is undefined", () => { + const data = { + validation: { valid: true }, + lint: { clean: false }, + safety: { safe: true }, + pii: { findings: [] }, + } + // lint.findings?.length is undefined, ?? 0 yields "0 lint findings" + expect(formatCheckTitle(data)).toBe("0 lint findings") + }) + + test("only shows failing sections, not passing ones", () => { + const data = { + validation: { valid: true }, + lint: { clean: true }, + safety: { safe: false, threats: [{ type: "drop_table" }] }, + pii: { findings: [] }, + } + expect(formatCheckTitle(data)).toBe("safety threats") + }) +}) + +describe("formatCheck", () => { + test("formats all-pass result with four sections", () => { + const data = { + validation: { valid: true }, + lint: { clean: true }, + safety: { safe: true }, + pii: { findings: [] }, + } + const output = formatCheck(data) + expect(output).toContain("=== Validation ===") + expect(output).toContain("Valid SQL.") + expect(output).toContain("=== Lint ===") + expect(output).toContain("No lint findings.") + expect(output).toContain("=== Safety ===") + expect(output).toContain("Safe — no threats.") + expect(output).toContain("=== PII ===") + expect(output).toContain("No PII detected.") + }) + + test("formats validation errors", () => { + const data = { + validation: { valid: false, errors: [{ message: "syntax error at line 3" }, { message: "unknown column" }] }, + lint: { clean: true }, + safety: { safe: true }, + pii: {}, + } + const output = formatCheck(data) + expect(output).toContain("Invalid: syntax error at line 3; unknown column") + }) + + test("formats lint findings with severity and rule", () => { + const data = { + validation: { valid: true }, + lint: { + clean: false, + findings: [ + { severity: "warning", rule: "L001", message: "Unnecessary whitespace" }, + { severity: "error", rule: "L003", message: "Indentation not consistent" }, + ], + }, + safety: { safe: true }, + pii: {}, + } + const output = formatCheck(data) + expect(output).toContain("[warning] L001: Unnecessary whitespace") + expect(output).toContain("[error] L003: Indentation not consistent") + }) + + test("formats safety threats", () => { + const data = { + validation: { valid: true }, + lint: { clean: true }, + safety: { + safe: false, + threats: [{ severity: "critical", type: "sql_injection", description: "Tautology detected: 1=1" }], + }, + pii: {}, + } + const output = formatCheck(data) + expect(output).toContain("[critical] sql_injection: Tautology detected: 1=1") + }) + + test("formats PII findings with column and confidence", () => { + const data = { + validation: { valid: true }, + lint: { clean: true }, + safety: { safe: true }, + pii: { + findings: [ + { column: "ssn", category: "SSN", confidence: "high" }, + { column: "email", category: "EMAIL", confidence: "medium" }, + ], + }, + } + const output = formatCheck(data) + expect(output).toContain("ssn: SSN (high confidence)") + expect(output).toContain("email: EMAIL (medium confidence)") + }) + + test("handles empty/missing sections without crashing", () => { + const data = {} as Record + const output = formatCheck(data) + // Should still produce all four section headers + expect(output).toContain("=== Validation ===") + expect(output).toContain("=== Lint ===") + expect(output).toContain("=== Safety ===") + expect(output).toContain("=== PII ===") + // validation.valid is undefined (falsy) → "Invalid: unknown" + expect(output).toContain("Invalid:") + }) +}) diff --git a/packages/opencode/test/altimate/altimate-core-equivalence-formatters.test.ts b/packages/opencode/test/altimate/altimate-core-equivalence-formatters.test.ts new file mode 100644 index 0000000000..5bf0ffd25a --- /dev/null +++ b/packages/opencode/test/altimate/altimate-core-equivalence-formatters.test.ts @@ -0,0 +1,107 @@ +import { describe, test, expect } from "bun:test" +import { + extractEquivalenceErrors, + formatEquivalence, +} from "../../src/altimate/tools/altimate-core-equivalence" + +describe("extractEquivalenceErrors", () => { + test("returns undefined when validation_errors is absent", () => { + expect(extractEquivalenceErrors({})).toBeUndefined() + }) + + test("returns undefined for empty validation_errors array", () => { + expect(extractEquivalenceErrors({ validation_errors: [] })).toBeUndefined() + }) + + test("joins string errors with semicolons", () => { + const data = { validation_errors: ["column X not found", "table Y not found"] } + expect(extractEquivalenceErrors(data)).toBe("column X not found; table Y not found") + }) + + test("extracts .message from object errors", () => { + const data = { + validation_errors: [ + { message: "unresolved reference to column 'id'" }, + { message: "ambiguous column name" }, + ], + } + expect(extractEquivalenceErrors(data)).toBe( + "unresolved reference to column 'id'; ambiguous column name", + ) + }) + + test("falls back to String(e) for non-string primitive errors", () => { + const data = { validation_errors: [42] } + // 42 → typeof 42 !== "string" → e.message (undefined) ?? String(42) = "42" + expect(extractEquivalenceErrors(data)).toBe("42") + }) + + test("handles null entries in validation_errors without crashing", () => { + // Previously crashed: null.message throws TypeError before ?? can evaluate + // Fixed with optional chaining: e?.message ?? String(e) + const data = { validation_errors: [null, "real error"] } + expect(extractEquivalenceErrors(data)).toBe("null; real error") + }) + + test("filters out falsy messages", () => { + const data = { validation_errors: [{ message: "" }, { message: "real error" }] } + // Empty string is filtered by .filter(Boolean) + expect(extractEquivalenceErrors(data)).toBe("real error") + }) + + test("returns undefined when all messages are empty", () => { + const data = { validation_errors: [{ message: "" }] } + expect(extractEquivalenceErrors(data)).toBeUndefined() + }) +}) + +describe("formatEquivalence", () => { + test("shows error message when data.error is present (short-circuits)", () => { + const data = { error: "Schema not found", equivalent: true } + // error takes priority over equivalent + expect(formatEquivalence(data)).toBe("Error: Schema not found") + }) + + test("shows equivalent message when queries match", () => { + const data = { equivalent: true } + expect(formatEquivalence(data)).toBe("Queries are semantically equivalent.") + }) + + test("shows different message when queries don't match", () => { + const data = { equivalent: false } + expect(formatEquivalence(data)).toContain("Queries produce different results.") + }) + + test("lists differences with description field", () => { + const data = { + equivalent: false, + differences: [ + { description: "WHERE clause differs" }, + { description: "Column order differs" }, + ], + } + const output = formatEquivalence(data) + expect(output).toContain("Differences:") + expect(output).toContain(" - WHERE clause differs") + expect(output).toContain(" - Column order differs") + }) + + test("falls back to raw value when description is absent", () => { + const data = { + equivalent: false, + differences: ["plain string difference"], + } + const output = formatEquivalence(data) + expect(output).toContain(" - plain string difference") + }) + + test("shows confidence level", () => { + const data = { equivalent: true, confidence: "high" } + expect(formatEquivalence(data)).toContain("Confidence: high") + }) + + test("omits confidence when not present", () => { + const data = { equivalent: true } + expect(formatEquivalence(data)).not.toContain("Confidence") + }) +}) diff --git a/packages/opencode/test/altimate/altimate-core-semantics-formatters.test.ts b/packages/opencode/test/altimate/altimate-core-semantics-formatters.test.ts new file mode 100644 index 0000000000..6a271120ce --- /dev/null +++ b/packages/opencode/test/altimate/altimate-core-semantics-formatters.test.ts @@ -0,0 +1,102 @@ +import { describe, test, expect } from "bun:test" +import { + extractSemanticsErrors, + formatSemantics, +} from "../../src/altimate/tools/altimate-core-semantics" + +describe("extractSemanticsErrors", () => { + test("returns undefined when validation_errors is absent", () => { + expect(extractSemanticsErrors({})).toBeUndefined() + }) + + test("returns undefined for empty validation_errors array", () => { + expect(extractSemanticsErrors({ validation_errors: [] })).toBeUndefined() + }) + + test("joins string errors with semicolons", () => { + const data = { validation_errors: ["missing table reference", "ambiguous column"] } + expect(extractSemanticsErrors(data)).toBe("missing table reference; ambiguous column") + }) + + test("extracts .message from object errors", () => { + const data = { + validation_errors: [{ message: "unresolved column 'foo'" }], + } + expect(extractSemanticsErrors(data)).toBe("unresolved column 'foo'") + }) + + test("returns undefined when all messages are empty strings", () => { + const data = { validation_errors: [{ message: "" }] } + expect(extractSemanticsErrors(data)).toBeUndefined() + }) +}) + +describe("formatSemantics", () => { + test("shows error message when data.error is present (short-circuits)", () => { + const data = { error: "napi-rs internal failure", valid: true } + // error takes priority over valid + expect(formatSemantics(data)).toBe("Error: napi-rs internal failure") + }) + + test("shows valid message when data.valid is true", () => { + const data = { valid: true } + expect(formatSemantics(data)).toBe("No semantic issues found.") + }) + + test("shows issues header when data.valid is false even with empty issues", () => { + // This tests the degenerate case: valid=false but no issues array + const data = { valid: false } + const output = formatSemantics(data) + expect(output).toContain("Semantic issues:") + // No actual issue lines since data.issues is undefined + }) + + test("lists issues with severity and rule", () => { + const data = { + valid: false, + issues: [ + { severity: "error", rule: "cartesian_product", message: "Unfiltered cross join detected" }, + { severity: "warning", rule: "null_comparison", message: "= NULL should be IS NULL" }, + ], + } + const output = formatSemantics(data) + expect(output).toContain("[error] cartesian_product: Unfiltered cross join detected") + expect(output).toContain("[warning] null_comparison: = NULL should be IS NULL") + }) + + test("defaults severity to 'warning' when absent", () => { + const data = { + valid: false, + issues: [{ type: "implicit_cast", message: "Implicit type cast on join" }], + } + const output = formatSemantics(data) + // severity ?? "warning" → defaults to "warning" + // rule ?? issue.type → uses type as fallback + expect(output).toContain("[warning] implicit_cast: Implicit type cast on join") + }) + + test("includes fix suggestions when present", () => { + const data = { + valid: false, + issues: [ + { + severity: "warning", + rule: "null_comparison", + message: "= NULL should be IS NULL", + suggestion: "Change `WHERE col = NULL` to `WHERE col IS NULL`", + }, + ], + } + const output = formatSemantics(data) + expect(output).toContain("Fix: Change `WHERE col = NULL` to `WHERE col IS NULL`") + }) + + test("omits fix line when suggestion is absent", () => { + const data = { + valid: false, + issues: [{ severity: "error", rule: "bad_join", message: "Wrong join condition" }], + } + const output = formatSemantics(data) + expect(output).not.toContain("Fix:") + }) +}) diff --git a/packages/opencode/test/altimate/connections.test.ts b/packages/opencode/test/altimate/connections.test.ts index 2fae89c911..c82fdba4fd 100644 --- a/packages/opencode/test/altimate/connections.test.ts +++ b/packages/opencode/test/altimate/connections.test.ts @@ -248,6 +248,17 @@ describe("CredentialStore", () => { expect(CredentialStore.isSensitiveField("authenticator")).toBe(false) }) + // altimate_change start — cover remaining SENSITIVE_FIELDS entries not in the test above + test("isSensitiveField covers BigQuery, SSL, and SSH credential fields", () => { + expect(CredentialStore.isSensitiveField("credentials_json")).toBe(true) + expect(CredentialStore.isSensitiveField("keyfile_json")).toBe(true) + expect(CredentialStore.isSensitiveField("ssl_key")).toBe(true) + expect(CredentialStore.isSensitiveField("ssl_cert")).toBe(true) + expect(CredentialStore.isSensitiveField("ssl_ca")).toBe(true) + expect(CredentialStore.isSensitiveField("ssh_password")).toBe(true) + }) + // altimate_change end + test("saveConnection strips inline private_key as sensitive", async () => { const config = { type: "snowflake", private_key: "-----BEGIN PRIVATE KEY-----\nMIIE..." } as any const { sanitized, warnings } = await CredentialStore.saveConnection("sf_keypair", config) diff --git a/packages/opencode/test/altimate/dbt-manifest-edge.test.ts b/packages/opencode/test/altimate/dbt-manifest-edge.test.ts new file mode 100644 index 0000000000..1051cf2e8b --- /dev/null +++ b/packages/opencode/test/altimate/dbt-manifest-edge.test.ts @@ -0,0 +1,111 @@ +import { describe, test, expect, afterEach } from "bun:test" +import fs from "fs" +import path from "path" +import os from "os" +import { parseManifest } from "../../src/altimate/native/dbt/manifest" + +describe("dbt manifest parser: edge cases", () => { + const tmpFiles: string[] = [] + + function writeTmpManifest(content: string): string { + const tmpFile = path.join(os.tmpdir(), `manifest-edge-${Date.now()}-${Math.random().toString(36).slice(2)}.json`) + fs.writeFileSync(tmpFile, content) + tmpFiles.push(tmpFile) + return tmpFile + } + + afterEach(() => { + for (const f of tmpFiles) { + try { fs.unlinkSync(f) } catch {} + } + tmpFiles.length = 0 + }) + + test("handles invalid JSON gracefully", async () => { + const tmpFile = writeTmpManifest("not json at all {{{") + const result = await parseManifest({ path: tmpFile }) + expect(result.models).toEqual([]) + expect(result.model_count).toBe(0) + }) + + test("handles manifest with no nodes or sources keys", async () => { + const tmpFile = writeTmpManifest(JSON.stringify({ metadata: { dbt_version: "1.5.0" } })) + const result = await parseManifest({ path: tmpFile }) + expect(result.models).toEqual([]) + expect(result.sources).toEqual([]) + expect(result.model_count).toBe(0) + expect(result.source_count).toBe(0) + }) + + test("handles model with empty columns dict", async () => { + const manifest = { + nodes: { + "model.my_project.my_model": { + resource_type: "model", + name: "my_model", + schema: "public", + database: "analytics", + config: { materialized: "table" }, + depends_on: { nodes: [] }, + columns: {}, + }, + }, + sources: {}, + } + const tmpFile = writeTmpManifest(JSON.stringify(manifest)) + const result = await parseManifest({ path: tmpFile }) + expect(result.model_count).toBe(1) + expect(result.models[0].columns).toEqual([]) + expect(result.models[0].name).toBe("my_model") + }) + + test("handles model missing depends_on entirely", async () => { + const manifest = { + nodes: { + "model.project.orphan": { + resource_type: "model", + name: "orphan", + columns: {}, + }, + }, + sources: {}, + } + const tmpFile = writeTmpManifest(JSON.stringify(manifest)) + const result = await parseManifest({ path: tmpFile }) + expect(result.model_count).toBe(1) + expect(result.models[0].depends_on).toEqual([]) + }) + + test("handles null manifest content (JSON null)", async () => { + const tmpFile = writeTmpManifest("null") + const result = await parseManifest({ path: tmpFile }) + expect(result.models).toEqual([]) + }) + + test("extracts source columns with type fallback", async () => { + const manifest = { + nodes: {}, + sources: { + "source.project.raw.orders": { + name: "orders", + source_name: "raw", + schema: "raw_data", + database: "warehouse", + columns: { + id: { name: "id", data_type: "INTEGER", description: "Primary key" }, + created_at: { name: "created_at", type: "TIMESTAMP" }, + }, + }, + }, + } + const tmpFile = writeTmpManifest(JSON.stringify(manifest)) + const result = await parseManifest({ path: tmpFile }) + expect(result.source_count).toBe(1) + expect(result.sources[0].name).toBe("orders") + expect(result.sources[0].source_name).toBe("raw") + expect(result.sources[0].columns).toHaveLength(2) + expect(result.sources[0].columns[0].data_type).toBe("INTEGER") + // Second column uses "type" fallback instead of "data_type" + expect(result.sources[0].columns[1].data_type).toBe("TIMESTAMP") + }) +}) diff --git a/packages/opencode/test/altimate/docker-discovery.test.ts b/packages/opencode/test/altimate/docker-discovery.test.ts new file mode 100644 index 0000000000..bcc8a463f1 --- /dev/null +++ b/packages/opencode/test/altimate/docker-discovery.test.ts @@ -0,0 +1,131 @@ +import { describe, test, expect } from "bun:test" +import { containerToConfig } from "../../src/altimate/native/connections/docker-discovery" +import type { DockerContainer } from "../../src/altimate/native/types" + +describe("containerToConfig: full container with all fields", () => { + test("converts a complete DockerContainer to ConnectionConfig", () => { + const container: DockerContainer = { + container_id: "abc123def456", + name: "my-postgres", + image: "postgres:15-alpine", + db_type: "postgres", + host: "127.0.0.1", + port: 5433, + user: "myuser", + password: "secret", + database: "mydb", + status: "running", + } + + const config = containerToConfig(container) + + expect(config).toEqual({ + type: "postgres", + host: "127.0.0.1", + port: 5433, + user: "myuser", + password: "secret", + database: "mydb", + }) + }) + + test("omits optional fields when not present on container", () => { + const container: DockerContainer = { + container_id: "abc123", + name: "bare-mysql", + image: "mysql:8", + db_type: "mysql", + host: "127.0.0.1", + port: 3306, + status: "running", + } + + const config = containerToConfig(container) + + // Should only have type, host, port — no user, password, database + expect(Object.keys(config).sort()).toEqual(["host", "port", "type"]) + expect(config.type).toBe("mysql") + expect(config.host).toBe("127.0.0.1") + expect(config.port).toBe(3306) + }) + + test("preserves db_type as config.type for all supported databases", () => { + const dbTypes = ["postgres", "mysql", "sqlserver", "oracle", "duckdb", "sqlite", "mongodb"] + + for (const dbType of dbTypes) { + const container: DockerContainer = { + container_id: "x", + name: `test-${dbType}`, + image: `${dbType}:latest`, + db_type: dbType, + host: "127.0.0.1", + port: 5432, + status: "running", + } + const config = containerToConfig(container) + expect(config.type).toBe(dbType) + } + }) + + test("includes user but not password when only user is set", () => { + const container: DockerContainer = { + container_id: "x", + name: "pg-no-pass", + image: "postgres:15", + db_type: "postgres", + host: "127.0.0.1", + port: 5432, + user: "postgres", + status: "running", + } + + const config = containerToConfig(container) + + expect(config.user).toBe("postgres") + expect(config.password).toBeUndefined() + expect(Object.keys(config).sort()).toEqual(["host", "port", "type", "user"]) + }) + + test("includes database but not user/password when only database is set", () => { + const container: DockerContainer = { + container_id: "x", + name: "pg-db-only", + image: "postgres:15", + db_type: "postgres", + host: "127.0.0.1", + port: 5432, + database: "analytics", + status: "running", + } + + const config = containerToConfig(container) + + expect(config.database).toBe("analytics") + expect(config.user).toBeUndefined() + expect(config.password).toBeUndefined() + }) + + test("does not include container_id, name, image, or status in config", () => { + const container: DockerContainer = { + container_id: "abc123def456", + name: "my-container", + image: "postgres:15", + db_type: "postgres", + host: "127.0.0.1", + port: 5432, + user: "pg", + password: "pass", + database: "db", + status: "running", + } + + const config = containerToConfig(container) + + // These Docker-specific fields should NOT leak into the ConnectionConfig + expect((config as any).container_id).toBeUndefined() + expect((config as any).name).toBeUndefined() + expect((config as any).image).toBeUndefined() + expect((config as any).status).toBeUndefined() + expect((config as any).db_type).toBeUndefined() + }) +}) diff --git a/packages/opencode/test/altimate/fingerprint-detect.test.ts b/packages/opencode/test/altimate/fingerprint-detect.test.ts index 70677147bf..e611340180 100644 --- a/packages/opencode/test/altimate/fingerprint-detect.test.ts +++ b/packages/opencode/test/altimate/fingerprint-detect.test.ts @@ -107,4 +107,37 @@ describe("Fingerprint.detect: file-based project detection", () => { expect(result.tags).toContain("dbt") expect(result.tags).toContain("sql") }) + + test("detects dbt-packages from dbt_packages.yml", async () => { + await using tmp = await tmpdir() + await fs.writeFile(path.join(tmp.path, "dbt_packages.yml"), "packages:\n - package: dbt-labs/dbt_utils\n") + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toContain("dbt-packages") + }) + + test("combined project detects multiple technologies", async () => { + await using tmp = await tmpdir() + await fs.writeFile(path.join(tmp.path, "dbt_project.yml"), "name: test\n") + await fs.writeFile(path.join(tmp.path, "airflow.cfg"), "[core]\n") + await fs.writeFile(path.join(tmp.path, "databricks.yml"), "bundle:\n name: test\n") + const result = await Fingerprint.detect(tmp.path) + expect(result.tags).toContain("dbt") + expect(result.tags).toContain("airflow") + expect(result.tags).toContain("databricks") + expect(result.tags).toContain("data-engineering") + }) +}) + +describe("Fingerprint.refresh", () => { + test("invalidates cache and re-detects new files", async () => { + await using tmp = await tmpdir() + // Initial detect — no tags + const r1 = await Fingerprint.detect(tmp.path) + expect(r1.tags).toEqual([]) + // Add dbt_project.yml after initial detect + await fs.writeFile(path.join(tmp.path, "dbt_project.yml"), "name: test\n") + // refresh() should invalidate cache and pick up the new file + const r3 = await Fingerprint.refresh() + expect(r3.tags).toContain("dbt") + }) }) diff --git a/packages/opencode/test/altimate/finops-recommendations.test.ts b/packages/opencode/test/altimate/finops-recommendations.test.ts new file mode 100644 index 0000000000..931ec3b777 --- /dev/null +++ b/packages/opencode/test/altimate/finops-recommendations.test.ts @@ -0,0 +1,83 @@ +import { describe, test, expect } from "bun:test" +import { SQL_TEMPLATES as AdvisorTemplates } from "../../src/altimate/native/finops/warehouse-advisor" + +describe("FinOps: warehouse-advisor generateSizingRecommendations", () => { + const { generateSizingRecommendations } = AdvisorTemplates + + test("SCALE_UP when avg_queue_load > 1.0", () => { + const loadData = [ + { warehouse_name: "ANALYTICS_WH", avg_queue_load: 2.5, peak_queue_load: 8.0, avg_concurrency: 5.0, sample_count: 100 }, + ] + const sizeByWarehouse = new Map([["ANALYTICS_WH", "Medium"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const scaleUp = recs.find((r: any) => r.type === "SCALE_UP") + expect(scaleUp).toBeDefined() + expect(scaleUp!.warehouse).toBe("ANALYTICS_WH") + expect(scaleUp!.current_size).toBe("Medium") + expect(scaleUp!.impact).toBe("high") + expect((scaleUp!.message as string)).toContain("2.5") + }) + + test("BURST_SCALING when peak_queue_load > 5.0 but avg_queue_load <= 1.0", () => { + const loadData = [ + { warehouse_name: "ETL_WH", avg_queue_load: 0.5, peak_queue_load: 12.0, avg_concurrency: 3.0, sample_count: 50 }, + ] + const sizeByWarehouse = new Map([["ETL_WH", "Large"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const burst = recs.find((r: any) => r.type === "BURST_SCALING") + expect(burst).toBeDefined() + expect(burst!.warehouse).toBe("ETL_WH") + expect(burst!.impact).toBe("medium") + }) + + test("SCALE_DOWN when avg_concurrency < 0.1 and avg_queue < 0.01 and size > X-Small", () => { + const loadData = [ + { warehouse_name: "DEV_WH", avg_queue_load: 0.001, peak_queue_load: 0.01, avg_concurrency: 0.05, sample_count: 200 }, + ] + const sizeByWarehouse = new Map([["DEV_WH", "Large"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const scaleDown = recs.find((r: any) => r.type === "SCALE_DOWN") + expect(scaleDown).toBeDefined() + expect(scaleDown!.warehouse).toBe("DEV_WH") + expect(scaleDown!.current_size).toBe("Large") + expect(scaleDown!.suggested_size).toBe("Medium") + }) + + test("SCALE_DOWN not suggested when already at X-Small", () => { + const loadData = [ + { warehouse_name: "TINY_WH", avg_queue_load: 0.0, peak_queue_load: 0.0, avg_concurrency: 0.01, sample_count: 10 }, + ] + const sizeByWarehouse = new Map([["TINY_WH", "X-Small"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const scaleDown = recs.find((r: any) => r.type === "SCALE_DOWN") + expect(scaleDown).toBeUndefined() + }) + + test("multiple warehouses can produce multiple different recommendations", () => { + const loadData = [ + { warehouse_name: "HOT_WH", avg_queue_load: 3.0, peak_queue_load: 10.0, avg_concurrency: 8.0, sample_count: 500 }, + { warehouse_name: "COLD_WH", avg_queue_load: 0.0, peak_queue_load: 0.0, avg_concurrency: 0.02, sample_count: 10 }, + ] + const sizeByWarehouse = new Map([["HOT_WH", "Medium"], ["COLD_WH", "Large"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + expect(recs.some((r: any) => r.type === "SCALE_UP" && r.warehouse === "HOT_WH")).toBe(true) + expect(recs.some((r: any) => r.type === "SCALE_DOWN" && r.warehouse === "COLD_WH")).toBe(true) + }) + + test("falls back to 'unknown' when sizeByWarehouse has no entry", () => { + const loadData = [ + { warehouse_name: "MYSTERY_WH", avg_queue_load: 2.0, peak_queue_load: 3.0, avg_concurrency: 1.0, sample_count: 100 }, + ] + const sizeByWarehouse = new Map() + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const scaleUp = recs.find((r: any) => r.type === "SCALE_UP") + expect(scaleUp).toBeDefined() + expect(scaleUp!.current_size).toBe("unknown") + }) +}) diff --git a/packages/opencode/test/bus/bus.test.ts b/packages/opencode/test/bus/bus.test.ts new file mode 100644 index 0000000000..1803ecfd15 --- /dev/null +++ b/packages/opencode/test/bus/bus.test.ts @@ -0,0 +1,127 @@ +import { describe, test, expect } from "bun:test" +import z from "zod" +import { Bus } from "../../src/bus" +import { BusEvent } from "../../src/bus/bus-event" +import { Instance } from "../../src/project/instance" +import { tmpdir } from "../fixture/fixture" + +const TestEvent = BusEvent.define("__test_bus_pub_sub", z.object({ value: z.string() })) +const OtherEvent = BusEvent.define("__test_bus_other_type", z.object({ n: z.number() })) + +describe("Bus: publish and subscribe", () => { + test("subscriber receives published event", async () => { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const received: any[] = [] + const unsub = Bus.subscribe(TestEvent, (e) => received.push(e)) + await Bus.publish(TestEvent, { value: "hello" }) + expect(received).toHaveLength(1) + expect(received[0].properties.value).toBe("hello") + unsub() + }, + }) + }) + + test("unsubscribe stops receiving events", async () => { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const received: any[] = [] + const unsub = Bus.subscribe(TestEvent, (e) => received.push(e)) + await Bus.publish(TestEvent, { value: "first" }) + unsub() + await Bus.publish(TestEvent, { value: "second" }) + expect(received).toHaveLength(1) + }, + }) + }) + + test("multiple subscribers receive the same event", async () => { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const a: any[] = [] + const b: any[] = [] + const unsub1 = Bus.subscribe(TestEvent, (e) => a.push(e)) + const unsub2 = Bus.subscribe(TestEvent, (e) => b.push(e)) + await Bus.publish(TestEvent, { value: "shared" }) + expect(a).toHaveLength(1) + expect(b).toHaveLength(1) + unsub1() + unsub2() + }, + }) + }) + + test("subscriber only receives matching event type", async () => { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const received: any[] = [] + const unsub = Bus.subscribe(TestEvent, (e) => received.push(e)) + await Bus.publish(OtherEvent, { n: 42 }) + expect(received).toHaveLength(0) + unsub() + }, + }) + }) +}) + +describe("Bus: subscribeAll wildcard", () => { + test("wildcard subscriber receives all event types", async () => { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const received: any[] = [] + const unsub = Bus.subscribeAll((e) => received.push(e)) + await Bus.publish(TestEvent, { value: "a" }) + await Bus.publish(OtherEvent, { n: 1 }) + expect(received).toHaveLength(2) + unsub() + }, + }) + }) +}) + +describe("Bus: once", () => { + test("once unsubscribes after callback returns 'done'", async () => { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + let count = 0 + Bus.once(TestEvent, () => { + count++ + return "done" + }) + await Bus.publish(TestEvent, { value: "first" }) + await Bus.publish(TestEvent, { value: "second" }) + expect(count).toBe(1) + }, + }) + }) + + test("once continues if callback returns undefined", async () => { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + let count = 0 + Bus.once(TestEvent, () => { + count++ + return count >= 2 ? "done" : undefined + }) + await Bus.publish(TestEvent, { value: "1" }) + await Bus.publish(TestEvent, { value: "2" }) + await Bus.publish(TestEvent, { value: "3" }) + expect(count).toBe(2) + }, + }) + }) +}) diff --git a/packages/opencode/test/cli/stats.test.ts b/packages/opencode/test/cli/stats.test.ts new file mode 100644 index 0000000000..0b176ee8fe --- /dev/null +++ b/packages/opencode/test/cli/stats.test.ts @@ -0,0 +1,185 @@ +/** + * Tests for `altimate-code stats` display formatting. + * + * displayStats() is the primary user-facing output for the stats command. + * formatNumber (module-private) converts token counts to human-readable + * format (e.g., 1500 → "1.5K"). These tests verify formatting via the + * exported displayStats function to catch regressions in CLI output. + */ +import { describe, test, expect } from "bun:test" +import { displayStats } from "../../src/cli/cmd/stats" + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** Capture console.log output from a synchronous function. */ +function captureOutput(fn: () => void): string { + const lines: string[] = [] + const origLog = console.log + // displayStats also uses process.stdout.write for ANSI cursor movement + // in the model-usage section — we skip that branch by not passing modelLimit. + console.log = (...args: unknown[]) => lines.push(args.join(" ")) + try { + fn() + } finally { + console.log = origLog + } + return lines.join("\n") +} + +/** Minimal valid SessionStats — all zeroes. */ +function emptyStats() { + return { + totalSessions: 0, + totalMessages: 0, + totalCost: 0, + totalTokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } }, + toolUsage: {} as Record, + modelUsage: {} as Record, + dateRange: { earliest: Date.now(), latest: Date.now() }, + days: 1, + costPerDay: 0, + tokensPerSession: 0, + medianTokensPerSession: 0, + } +} + +// --------------------------------------------------------------------------- +// formatNumber via displayStats +// --------------------------------------------------------------------------- + +describe("stats: formatNumber rendering", () => { + test("values under 1000 display as plain integer", () => { + const stats = emptyStats() + stats.totalTokens.input = 999 + const out = captureOutput(() => displayStats(stats)) + expect(out).toContain("999") + // Should not be formatted with K or M suffix + expect(out).not.toMatch(/999.*K/) + }) + + test("exactly 1000 displays as 1.0K", () => { + const stats = emptyStats() + stats.totalTokens.input = 1000 + const out = captureOutput(() => displayStats(stats)) + expect(out).toContain("1.0K") + }) + + test("1500 displays as 1.5K", () => { + const stats = emptyStats() + stats.totalTokens.input = 1500 + const out = captureOutput(() => displayStats(stats)) + expect(out).toContain("1.5K") + }) + + test("exactly 1000000 displays as 1.0M", () => { + const stats = emptyStats() + stats.totalTokens.input = 1_000_000 + const out = captureOutput(() => displayStats(stats)) + expect(out).toContain("1.0M") + }) + + test("2500000 displays as 2.5M", () => { + const stats = emptyStats() + stats.totalTokens.input = 2_500_000 + const out = captureOutput(() => displayStats(stats)) + expect(out).toContain("2.5M") + }) + + test("zero displays as 0", () => { + const stats = emptyStats() + const out = captureOutput(() => displayStats(stats)) + // Input line should show 0, not "0K" or empty + expect(out).toMatch(/Input\s+0\s/) + }) +}) + +// --------------------------------------------------------------------------- +// displayStats: cost and NaN safety +// --------------------------------------------------------------------------- + +describe("stats: cost display safety", () => { + test("zero cost renders as $0.00, never NaN", () => { + const stats = emptyStats() + const out = captureOutput(() => displayStats(stats)) + expect(out).not.toContain("NaN") + expect(out).toContain("$0.00") + }) + + test("fractional cost renders with two decimal places", () => { + const stats = emptyStats() + stats.totalCost = 1.234 + stats.costPerDay = 0.617 + const out = captureOutput(() => displayStats(stats)) + expect(out).toContain("$1.23") + expect(out).toContain("$0.62") + }) +}) + +// --------------------------------------------------------------------------- +// displayStats: tool usage rendering +// --------------------------------------------------------------------------- + +describe("stats: tool usage display", () => { + test("tool usage shows bar chart with percentages", () => { + const stats = emptyStats() + stats.toolUsage = { read: 50, write: 30, bash: 20 } + const out = captureOutput(() => displayStats(stats)) + expect(out).toContain("TOOL USAGE") + expect(out).toContain("read") + expect(out).toContain("write") + expect(out).toContain("bash") + // Percentages should be present + expect(out).toContain("%") + }) + + test("tool limit restricts number of tools shown", () => { + const stats = emptyStats() + stats.toolUsage = { read: 50, write: 30, bash: 20, edit: 10, glob: 5 } + const out = captureOutput(() => displayStats(stats, 2)) + // Only top 2 tools should appear (read and write by count) + expect(out).toContain("read") + expect(out).toContain("write") + expect(out).not.toContain("glob") + }) + + test("empty tool usage omits TOOL USAGE section", () => { + const stats = emptyStats() + stats.toolUsage = {} + const out = captureOutput(() => displayStats(stats)) + expect(out).not.toContain("TOOL USAGE") + }) + + test("long tool names are truncated", () => { + const stats = emptyStats() + stats.toolUsage = { "a_very_long_tool_name_that_exceeds_limit": 10 } + const out = captureOutput(() => displayStats(stats)) + // Tool name should be truncated to fit the column + expect(out).toContain("..") + }) +}) + +// --------------------------------------------------------------------------- +// displayStats: overview section +// --------------------------------------------------------------------------- + +describe("stats: overview section", () => { + test("renders session and message counts", () => { + const stats = emptyStats() + stats.totalSessions = 42 + stats.totalMessages = 1337 + const out = captureOutput(() => displayStats(stats)) + expect(out).toContain("OVERVIEW") + expect(out).toContain("42") + expect(out).toContain("1,337") + }) + + test("renders box-drawing borders", () => { + const stats = emptyStats() + const out = captureOutput(() => displayStats(stats)) + expect(out).toContain("┌") + expect(out).toContain("┘") + expect(out).toContain("│") + }) +}) diff --git a/packages/opencode/test/control-plane/sse.test.ts b/packages/opencode/test/control-plane/sse.test.ts index 78a8341c0e..78f96cfe8e 100644 --- a/packages/opencode/test/control-plane/sse.test.ts +++ b/packages/opencode/test/control-plane/sse.test.ts @@ -53,4 +53,83 @@ describe("control-plane/sse", () => { }, ]) }) + + test("handles events split across chunk boundaries", async () => { + const events: unknown[] = [] + const stop = new AbortController() + + await parseSSE( + stream(['data: {"type":"spl', 'it"}\n\n']), + stop.signal, + (event) => events.push(event), + ) + + expect(events).toEqual([{ type: "split" }]) + }) + + test("handles double newline split across chunks", async () => { + const events: unknown[] = [] + const stop = new AbortController() + + await parseSSE( + stream(['data: {"type":"boundary"}\n', '\ndata: {"type":"next"}\n\n']), + stop.signal, + (event) => events.push(event), + ) + + expect(events).toEqual([{ type: "boundary" }, { type: "next" }]) + }) + + test("ignores empty events (double newline with no data)", async () => { + const events: unknown[] = [] + const stop = new AbortController() + + await parseSSE( + stream(['\n\ndata: {"type":"real"}\n\n']), + stop.signal, + (event) => events.push(event), + ) + + expect(events).toEqual([{ type: "real" }]) + }) + + test("abort signal stops processing mid-stream", async () => { + const events: unknown[] = [] + const stop = new AbortController() + + // Stream that delivers chunks on demand via pull(); abort fires + // between the first and second read. + let pullCount = 0 + const body = new ReadableStream({ + pull(controller) { + const encoder = new TextEncoder() + pullCount++ + if (pullCount === 1) { + controller.enqueue(encoder.encode('data: {"type":"first"}\n\n')) + // Abort before next pull delivers second event + stop.abort() + } else { + controller.enqueue(encoder.encode('data: {"type":"second"}\n\n')) + controller.close() + } + }, + }) + + await parseSSE(body, stop.signal, (event) => events.push(event)) + + expect(events).toEqual([{ type: "first" }]) + }) + + test("handles bare \\r line endings", async () => { + const events: unknown[] = [] + const stop = new AbortController() + + await parseSSE( + stream(['data: {"type":"cr"}\r\r']), + stop.signal, + (event) => events.push(event), + ) + + expect(events).toEqual([{ type: "cr" }]) + }) }) diff --git a/packages/opencode/test/mcp/auth.test.ts b/packages/opencode/test/mcp/auth.test.ts new file mode 100644 index 0000000000..c067cb7f31 --- /dev/null +++ b/packages/opencode/test/mcp/auth.test.ts @@ -0,0 +1,308 @@ +/** + * Tests for McpAuth (src/mcp/auth.ts) — credential persistence, URL validation, + * and token expiry logic. + * + * These functions are the security gate for MCP OAuth: getForUrl() prevents + * credentials from being sent to the wrong server when a user reconfigures + * their MCP server URL, and isTokenExpired() controls token refresh decisions. + * + * Also tests McpOAuthProvider.clientInformation() expiry handling from + * src/mcp/oauth-provider.ts. + */ + +import { describe, test, expect, afterEach } from "bun:test" +import { tmpdir } from "../fixture/fixture" + +const { McpAuth } = await import("../../src/mcp/auth") +const { McpOAuthProvider } = await import("../../src/mcp/oauth-provider") +const { Instance } = await import("../../src/project/instance") + +// --------------------------------------------------------------------------- +// McpAuth.getForUrl — URL validation for credential safety +// --------------------------------------------------------------------------- + +describe("McpAuth.getForUrl", () => { + test("returns undefined when no entry exists", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await McpAuth.getForUrl("nonexistent-server", "https://example.com/mcp") + expect(result).toBeUndefined() + }, + }) + }) + + test("returns undefined when entry has no serverUrl (old version migration)", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + // Write an entry without serverUrl, simulating data from an older version + await McpAuth.set("legacy-server", { + tokens: { accessToken: "old-token" }, + }) + + const result = await McpAuth.getForUrl("legacy-server", "https://example.com/mcp") + expect(result).toBeUndefined() + }, + }) + }) + + test("returns undefined when URL has changed (credential safety)", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + // Store credentials for server A + await McpAuth.set( + "my-mcp", + { + tokens: { accessToken: "token-for-server-a" }, + serverUrl: "https://server-a.example.com/mcp", + }, + "https://server-a.example.com/mcp", + ) + + // Try to get credentials for server B — must return undefined + const result = await McpAuth.getForUrl("my-mcp", "https://server-b.example.com/mcp") + expect(result).toBeUndefined() + }, + }) + }) + + test("returns the entry when URL matches exactly", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const serverUrl = "https://my-server.example.com/mcp" + await McpAuth.set( + "my-mcp", + { + tokens: { accessToken: "valid-token", refreshToken: "refresh" }, + serverUrl, + }, + serverUrl, + ) + + const result = await McpAuth.getForUrl("my-mcp", serverUrl) + expect(result).toBeDefined() + expect(result!.tokens!.accessToken).toBe("valid-token") + expect(result!.serverUrl).toBe(serverUrl) + }, + }) + }) + + test("CRUD lifecycle: set → getForUrl → updateTokens → getForUrl", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const name = "lifecycle-test" + const url = "https://lifecycle.example.com/mcp" + + // Set initial tokens + await McpAuth.updateTokens(name, { accessToken: "first-token" }, url) + + // Verify retrieval + const entry1 = await McpAuth.getForUrl(name, url) + expect(entry1).toBeDefined() + expect(entry1!.tokens!.accessToken).toBe("first-token") + + // Update tokens (same URL) + await McpAuth.updateTokens(name, { accessToken: "second-token" }, url) + const entry2 = await McpAuth.getForUrl(name, url) + expect(entry2!.tokens!.accessToken).toBe("second-token") + + // Different URL should not return the entry + const entry3 = await McpAuth.getForUrl(name, "https://different.example.com/mcp") + expect(entry3).toBeUndefined() + }, + }) + }) +}) + +// --------------------------------------------------------------------------- +// McpAuth.isTokenExpired — token expiry checking +// --------------------------------------------------------------------------- + +describe("McpAuth.isTokenExpired", () => { + const originalDateNow = Date.now + + afterEach(() => { + Date.now = originalDateNow + }) + + test("returns null when no tokens exist", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await McpAuth.isTokenExpired("no-such-server") + expect(result).toBeNull() + }, + }) + }) + + test("returns false when tokens have no expiry (never expires)", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + await McpAuth.updateTokens("no-expiry", { accessToken: "forever-token" }) + const result = await McpAuth.isTokenExpired("no-expiry") + expect(result).toBe(false) + }, + }) + }) + + test("returns false when token expiry is in the future", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + // expiresAt is in Unix seconds; set to 1 hour from now + const futureExpiry = Date.now() / 1000 + 3600 + await McpAuth.updateTokens("valid-token", { + accessToken: "not-expired", + expiresAt: futureExpiry, + }) + const result = await McpAuth.isTokenExpired("valid-token") + expect(result).toBe(false) + }, + }) + }) + + test("returns true when token has expired", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + // expiresAt is in Unix seconds; set to 1 hour ago + const pastExpiry = Date.now() / 1000 - 3600 + await McpAuth.updateTokens("expired-token", { + accessToken: "old-token", + expiresAt: pastExpiry, + }) + const result = await McpAuth.isTokenExpired("expired-token") + expect(result).toBe(true) + }, + }) + }) +}) + +// --------------------------------------------------------------------------- +// McpOAuthProvider.clientInformation() — client secret expiry detection +// --------------------------------------------------------------------------- + +describe("McpOAuthProvider.clientInformation", () => { + const originalDateNow = Date.now + + afterEach(() => { + Date.now = originalDateNow + }) + + test("returns config-based client info when clientId is set", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const provider = new McpOAuthProvider( + "config-client", + "https://example.com/mcp", + { clientId: "my-client-id", clientSecret: "my-secret" }, + { onRedirect: async () => {} }, + ) + + const info = await provider.clientInformation() + expect(info).toBeDefined() + expect(info!.client_id).toBe("my-client-id") + expect(info!.client_secret).toBe("my-secret") + }, + }) + }) + + test("returns stored client info when not expired", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const serverUrl = "https://example.com/mcp" + + // Store client info with a future expiry, using the same serverUrl + await McpAuth.updateClientInfo( + "stored-client", + { + clientId: "dynamic-client-id", + clientSecret: "dynamic-secret", + clientSecretExpiresAt: Date.now() / 1000 + 86400, // expires in 24h + }, + serverUrl, + ) + + const provider = new McpOAuthProvider( + "stored-client", + serverUrl, + {}, // no config clientId — forces lookup from store + { onRedirect: async () => {} }, + ) + + const info = await provider.clientInformation() + expect(info).toBeDefined() + expect(info!.client_id).toBe("dynamic-client-id") + expect(info!.client_secret).toBe("dynamic-secret") + }, + }) + }) + + test("returns undefined when stored client secret has expired (triggers re-registration)", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const serverUrl = "https://example.com/mcp" + + // Store client info with an expiry in the past, using the same serverUrl + await McpAuth.updateClientInfo( + "expired-client", + { + clientId: "old-client-id", + clientSecret: "old-secret", + clientSecretExpiresAt: Date.now() / 1000 - 3600, // expired 1 hour ago + }, + serverUrl, + ) + + const provider = new McpOAuthProvider( + "expired-client", + serverUrl, + {}, // no config clientId + { onRedirect: async () => {} }, + ) + + // Must return undefined so the SDK triggers dynamic registration + const info = await provider.clientInformation() + expect(info).toBeUndefined() + }, + }) + }) + + test("returns undefined when no stored credentials and no config (dynamic registration)", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const provider = new McpOAuthProvider( + "brand-new-server", + "https://brand-new.example.com/mcp", + {}, // no config + { onRedirect: async () => {} }, + ) + + const info = await provider.clientInformation() + expect(info).toBeUndefined() + }, + }) + }) +}) diff --git a/packages/opencode/test/mcp/oauth-callback.test.ts b/packages/opencode/test/mcp/oauth-callback.test.ts new file mode 100644 index 0000000000..b248bb3e16 --- /dev/null +++ b/packages/opencode/test/mcp/oauth-callback.test.ts @@ -0,0 +1,137 @@ +/** + * Tests for MCP OAuth callback server — XSS prevention and HTTP behavior. + * + * The OAuth callback page renders error messages from external MCP servers. + * If escapeHtml (module-private) fails to sanitize these strings, a malicious + * server could inject scripts into the user's browser via error_description. + * + * Tests exercise the server at the HTTP level since escapeHtml is not exported. + */ +import { describe, test, expect, afterEach, beforeEach } from "bun:test" + +const { McpOAuthCallback } = await import("../../src/mcp/oauth-callback") +const { OAUTH_CALLBACK_PORT, OAUTH_CALLBACK_PATH } = await import("../../src/mcp/oauth-provider") + +const BASE_URL = `http://127.0.0.1:${OAUTH_CALLBACK_PORT}${OAUTH_CALLBACK_PATH}` + +beforeEach(async () => { + // Ensure clean state — stop any leftover server + await McpOAuthCallback.stop() + await McpOAuthCallback.ensureRunning() +}) + +afterEach(async () => { + await McpOAuthCallback.stop() +}) + +// --------------------------------------------------------------------------- +// XSS prevention +// --------------------------------------------------------------------------- + +describe("OAuth callback: XSS prevention in error page", () => { + test("escapes " + const url = `${BASE_URL}?error=access_denied&error_description=${encodeURIComponent(xss)}&state=test-state` + const res = await fetch(url) + const body = await res.text() + + // The raw