diff --git a/packages/opencode/test/altimate/dbt-manifest-edge.test.ts b/packages/opencode/test/altimate/dbt-manifest-edge.test.ts new file mode 100644 index 000000000..1051cf2e8 --- /dev/null +++ b/packages/opencode/test/altimate/dbt-manifest-edge.test.ts @@ -0,0 +1,111 @@ +import { describe, test, expect, afterEach } from "bun:test" +import fs from "fs" +import path from "path" +import os from "os" +import { parseManifest } from "../../src/altimate/native/dbt/manifest" + +describe("dbt manifest parser: edge cases", () => { + const tmpFiles: string[] = [] + + function writeTmpManifest(content: string): string { + const tmpFile = path.join(os.tmpdir(), `manifest-edge-${Date.now()}-${Math.random().toString(36).slice(2)}.json`) + fs.writeFileSync(tmpFile, content) + tmpFiles.push(tmpFile) + return tmpFile + } + + afterEach(() => { + for (const f of tmpFiles) { + try { fs.unlinkSync(f) } catch {} + } + tmpFiles.length = 0 + }) + + test("handles invalid JSON gracefully", async () => { + const tmpFile = writeTmpManifest("not json at all {{{") + const result = await parseManifest({ path: tmpFile }) + expect(result.models).toEqual([]) + expect(result.model_count).toBe(0) + }) + + test("handles manifest with no nodes or sources keys", async () => { + const tmpFile = writeTmpManifest(JSON.stringify({ metadata: { dbt_version: "1.5.0" } })) + const result = await parseManifest({ path: tmpFile }) + expect(result.models).toEqual([]) + expect(result.sources).toEqual([]) + expect(result.model_count).toBe(0) + expect(result.source_count).toBe(0) + }) + + test("handles model with empty columns dict", async () => { + const manifest = { + nodes: { + "model.my_project.my_model": { + resource_type: "model", + name: "my_model", + schema: "public", + database: "analytics", + config: { materialized: "table" }, + depends_on: { nodes: [] }, + columns: {}, + }, + }, + sources: {}, + } + const tmpFile = writeTmpManifest(JSON.stringify(manifest)) + const result = await parseManifest({ path: tmpFile }) + expect(result.model_count).toBe(1) + expect(result.models[0].columns).toEqual([]) + expect(result.models[0].name).toBe("my_model") + }) + + test("handles model missing depends_on entirely", async () => { + const manifest = { + nodes: { + "model.project.orphan": { + resource_type: "model", + name: "orphan", + columns: {}, + }, + }, + sources: {}, + } + const tmpFile = writeTmpManifest(JSON.stringify(manifest)) + const result = await parseManifest({ path: tmpFile }) + expect(result.model_count).toBe(1) + expect(result.models[0].depends_on).toEqual([]) + }) + + test("handles null manifest content (JSON null)", async () => { + const tmpFile = writeTmpManifest("null") + const result = await parseManifest({ path: tmpFile }) + expect(result.models).toEqual([]) + }) + + test("extracts source columns with type fallback", async () => { + const manifest = { + nodes: {}, + sources: { + "source.project.raw.orders": { + name: "orders", + source_name: "raw", + schema: "raw_data", + database: "warehouse", + columns: { + id: { name: "id", data_type: "INTEGER", description: "Primary key" }, + created_at: { name: "created_at", type: "TIMESTAMP" }, + }, + }, + }, + } + const tmpFile = writeTmpManifest(JSON.stringify(manifest)) + const result = await parseManifest({ path: tmpFile }) + expect(result.source_count).toBe(1) + expect(result.sources[0].name).toBe("orders") + expect(result.sources[0].source_name).toBe("raw") + expect(result.sources[0].columns).toHaveLength(2) + expect(result.sources[0].columns[0].data_type).toBe("INTEGER") + // Second column uses "type" fallback instead of "data_type" + expect(result.sources[0].columns[1].data_type).toBe("TIMESTAMP") + }) +}) diff --git a/packages/opencode/test/altimate/finops-recommendations.test.ts b/packages/opencode/test/altimate/finops-recommendations.test.ts new file mode 100644 index 000000000..931ec3b77 --- /dev/null +++ b/packages/opencode/test/altimate/finops-recommendations.test.ts @@ -0,0 +1,83 @@ +import { describe, test, expect } from "bun:test" +import { SQL_TEMPLATES as AdvisorTemplates } from "../../src/altimate/native/finops/warehouse-advisor" + +describe("FinOps: warehouse-advisor generateSizingRecommendations", () => { + const { generateSizingRecommendations } = AdvisorTemplates + + test("SCALE_UP when avg_queue_load > 1.0", () => { + const loadData = [ + { warehouse_name: "ANALYTICS_WH", avg_queue_load: 2.5, peak_queue_load: 8.0, avg_concurrency: 5.0, sample_count: 100 }, + ] + const sizeByWarehouse = new Map([["ANALYTICS_WH", "Medium"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const scaleUp = recs.find((r: any) => r.type === "SCALE_UP") + expect(scaleUp).toBeDefined() + expect(scaleUp!.warehouse).toBe("ANALYTICS_WH") + expect(scaleUp!.current_size).toBe("Medium") + expect(scaleUp!.impact).toBe("high") + expect((scaleUp!.message as string)).toContain("2.5") + }) + + test("BURST_SCALING when peak_queue_load > 5.0 but avg_queue_load <= 1.0", () => { + const loadData = [ + { warehouse_name: "ETL_WH", avg_queue_load: 0.5, peak_queue_load: 12.0, avg_concurrency: 3.0, sample_count: 50 }, + ] + const sizeByWarehouse = new Map([["ETL_WH", "Large"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const burst = recs.find((r: any) => r.type === "BURST_SCALING") + expect(burst).toBeDefined() + expect(burst!.warehouse).toBe("ETL_WH") + expect(burst!.impact).toBe("medium") + }) + + test("SCALE_DOWN when avg_concurrency < 0.1 and avg_queue < 0.01 and size > X-Small", () => { + const loadData = [ + { warehouse_name: "DEV_WH", avg_queue_load: 0.001, peak_queue_load: 0.01, avg_concurrency: 0.05, sample_count: 200 }, + ] + const sizeByWarehouse = new Map([["DEV_WH", "Large"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const scaleDown = recs.find((r: any) => r.type === "SCALE_DOWN") + expect(scaleDown).toBeDefined() + expect(scaleDown!.warehouse).toBe("DEV_WH") + expect(scaleDown!.current_size).toBe("Large") + expect(scaleDown!.suggested_size).toBe("Medium") + }) + + test("SCALE_DOWN not suggested when already at X-Small", () => { + const loadData = [ + { warehouse_name: "TINY_WH", avg_queue_load: 0.0, peak_queue_load: 0.0, avg_concurrency: 0.01, sample_count: 10 }, + ] + const sizeByWarehouse = new Map([["TINY_WH", "X-Small"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const scaleDown = recs.find((r: any) => r.type === "SCALE_DOWN") + expect(scaleDown).toBeUndefined() + }) + + test("multiple warehouses can produce multiple different recommendations", () => { + const loadData = [ + { warehouse_name: "HOT_WH", avg_queue_load: 3.0, peak_queue_load: 10.0, avg_concurrency: 8.0, sample_count: 500 }, + { warehouse_name: "COLD_WH", avg_queue_load: 0.0, peak_queue_load: 0.0, avg_concurrency: 0.02, sample_count: 10 }, + ] + const sizeByWarehouse = new Map([["HOT_WH", "Medium"], ["COLD_WH", "Large"]]) + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + expect(recs.some((r: any) => r.type === "SCALE_UP" && r.warehouse === "HOT_WH")).toBe(true) + expect(recs.some((r: any) => r.type === "SCALE_DOWN" && r.warehouse === "COLD_WH")).toBe(true) + }) + + test("falls back to 'unknown' when sizeByWarehouse has no entry", () => { + const loadData = [ + { warehouse_name: "MYSTERY_WH", avg_queue_load: 2.0, peak_queue_load: 3.0, avg_concurrency: 1.0, sample_count: 100 }, + ] + const sizeByWarehouse = new Map() + const recs = generateSizingRecommendations(loadData, [], sizeByWarehouse) + + const scaleUp = recs.find((r: any) => r.type === "SCALE_UP") + expect(scaleUp).toBeDefined() + expect(scaleUp!.current_size).toBe("unknown") + }) +})