diff --git a/apps/sim/app/workspace/[workspaceId]/tables/[tableId]/components/column-sidebar/column-sidebar.tsx b/apps/sim/app/workspace/[workspaceId]/tables/[tableId]/components/column-sidebar/column-sidebar.tsx index cd72fe26d57..69fc07bcf02 100644 --- a/apps/sim/app/workspace/[workspaceId]/tables/[tableId]/components/column-sidebar/column-sidebar.tsx +++ b/apps/sim/app/workspace/[workspaceId]/tables/[tableId]/components/column-sidebar/column-sidebar.tsx @@ -765,8 +765,13 @@ export function ColumnSidebar({ } const typeOptions = useMemo( - () => COLUMN_TYPE_OPTIONS.map((o) => ({ label: o.label, value: o.type, icon: o.icon })), - [] + () => + COLUMN_TYPE_OPTIONS.filter((o) => o.type !== 'workflow' || !!existingGroup).map((o) => ({ + label: o.label, + value: o.type, + icon: o.icon, + })), + [existingGroup] ) /** diff --git a/apps/sim/lib/copilot/generated/tool-catalog-v1.ts b/apps/sim/lib/copilot/generated/tool-catalog-v1.ts index 87966286490..5083432bd87 100644 --- a/apps/sim/lib/copilot/generated/tool-catalog-v1.ts +++ b/apps/sim/lib/copilot/generated/tool-catalog-v1.ts @@ -2792,15 +2792,6 @@ export const UserTable: ToolCatalogEntry = { type: 'object', description: 'Arguments for the operation', properties: { - autoRun: { - type: 'boolean', - description: - 'Optional flag for add_workflow_group. When true, existing rows whose dependencies are already filled run immediately. Default false: groups are staged silently — call run_workflow_group when ready to fire rows. Set true if the user explicitly asked you to start runs.', - }, - blockId: { - type: 'string', - description: 'Source block ID inside the workflow. Used by add_workflow_group_output.', - }, column: { type: 'object', description: 'Column definition for add_column: { name, type, unique?, position? }', @@ -2808,7 +2799,7 @@ export const UserTable: ToolCatalogEntry = { columnName: { type: 'string', description: - 'Column name. Required for rename_column, update_column, and delete_workflow_group_output (the bound column to drop). Optional for add_workflow_group_output (auto-derived from path when omitted). Use columnNames array for batch delete_column.', + 'Column name (required for rename_column, update_column; use columnNames array for batch delete_column)', }, columnNames: { type: 'array', @@ -2819,23 +2810,6 @@ export const UserTable: ToolCatalogEntry = { type: 'object', description: 'Row data as key-value pairs (required for insert_row, update_row)', }, - dependencies: { - type: 'object', - description: - 'Dependencies the workflow group requires before running a row. { columns?: string[] } lists input column names that must be filled; { workflowGroups?: string[] } lists other workflow group IDs whose outputs must complete first. Used by add_workflow_group and update_workflow_group.', - properties: { - columns: { - type: 'array', - description: 'Input column names that must be filled before the group runs.', - items: { type: 'string' }, - }, - workflowGroups: { - type: 'array', - description: 'Other workflow group IDs whose outputs must complete first.', - items: { type: 'string' }, - }, - }, - }, description: { type: 'string', description: "Table description (optional for 'create')" }, fileId: { type: 'string', @@ -2852,11 +2826,6 @@ export const UserTable: ToolCatalogEntry = { description: 'MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter', }, - groupId: { - type: 'string', - description: - 'Workflow group ID (required for update_workflow_group, delete_workflow_group, add_workflow_group_output, delete_workflow_group_output, run_workflow_group).', - }, limit: { type: 'number', description: 'Maximum rows to return or affect (optional, default 100)', @@ -2864,11 +2833,11 @@ export const UserTable: ToolCatalogEntry = { mapping: { type: 'object', description: - 'Optional explicit CSV-header → table-column mapping for import_file, as { "csvHeader": "columnName" | null }. A string maps the CSV header to that table column; null skips that CSV header (it won\'t be imported); omit a header entirely to fall back to auto-mapping by sanitized name (case-insensitive).', + 'Optional explicit CSV-header → table-column mapping for import_file, as { "csvHeader": "columnName" | null }. When omitted, headers are auto-matched by sanitized name (case-insensitive fallback). Use null to skip a CSV column.', additionalProperties: { - type: ['string', 'null'], + type: 'string', description: - "Target column name on the table. null skips that CSV header (it won't be imported); omit it entirely to fall back to auto-mapping.", + 'Target column name on the table. Use null to skip this CSV header instead of a column name.', }, }, mode: { @@ -2899,33 +2868,6 @@ export const UserTable: ToolCatalogEntry = { description: 'Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like "files/export.csv" — nested paths are not supported.', }, - outputs: { - type: 'array', - description: - "Outputs to surface as columns. Each entry maps a workflow block output to a table column: { blockId, path, columnName?, columnType? }. blockId is the source block; path is the dotted output path; columnName auto-derives from the path when omitted; columnType defaults from the leaf type when omitted. Used by add_workflow_group. (Also accepted by update_workflow_group for the UI's bulk replace, but the AI flow should use add_workflow_group_output / delete_workflow_group_output instead.) If unsure about valid (blockId, path) pairs, call list_workflow_outputs first — paths are validated against the live workflow and invalid picks return an error with the valid options. For Agent blocks with structured outputs, the structured fields appear as top-level paths (e.g. summary, industry); there is NO response.content path on a structured agent.", - items: { - type: 'object', - properties: { - blockId: { type: 'string', description: 'Source block ID inside the workflow.' }, - columnName: { - type: 'string', - description: - 'Optional target column name. Auto-derived from the path when omitted.', - }, - columnType: { - type: 'string', - description: 'Optional column type. Defaults from the leaf type when omitted.', - enum: ['string', 'number', 'boolean', 'date', 'json'], - }, - path: { type: 'string', description: 'Dotted output path on the block.' }, - }, - required: ['blockId', 'path'], - }, - }, - path: { - type: 'string', - description: 'Dotted output path on the block. Used by add_workflow_group_output.', - }, position: { type: 'integer', description: @@ -2939,36 +2881,21 @@ export const UserTable: ToolCatalogEntry = { }, rowId: { type: 'string', - description: - "Row ID. Required for get_row, update_row, delete_row, and for cancel_table_runs when scope:'row'.", + description: 'Row ID (required for get_row, update_row, delete_row)', }, rowIds: { type: 'array', - description: - 'Array of row IDs. Used by batch_delete_rows (rows to delete) and run_workflow_group (optional row scope: when omitted, runs across the whole table; when provided, only these rows are candidates and the eligibility predicate still applies — mid-run rows or rows with unmet deps are silently skipped).', - items: { type: 'string' }, + description: 'Array of row IDs to delete (for batch_delete_rows)', }, rows: { type: 'array', description: 'Array of row data objects (required for batch_insert_rows)', }, - runMode: { - type: 'string', - description: - "Run mode for run_workflow_group. 'incomplete' (default) re-runs only rows that never produced output or last failed; 'all' re-runs every dep-satisfied row.", - enum: ['incomplete', 'all'], - }, schema: { type: 'object', description: "Table schema with columns array (required for 'create'). Each column: { name, type, unique? }", }, - scope: { - type: 'string', - description: - "Cancellation scope for cancel_table_runs. 'all' cancels in-flight runs across the whole table; 'row' cancels only the row identified by rowId.", - enum: ['all', 'row'], - }, sort: { type: 'object', description: @@ -2998,11 +2925,6 @@ export const UserTable: ToolCatalogEntry = { description: 'Map of rowId to value for single-column batch update: { "rowId1": val1, "rowId2": val2 } (for batch_update_rows with columnName)', }, - workflowId: { - type: 'string', - description: - 'ID of the workflow (required for add_workflow_group and list_workflow_outputs).', - }, }, }, operation: { @@ -3029,14 +2951,6 @@ export const UserTable: ToolCatalogEntry = { 'rename_column', 'delete_column', 'update_column', - 'add_workflow_group', - 'update_workflow_group', - 'delete_workflow_group', - 'add_workflow_group_output', - 'delete_workflow_group_output', - 'run_workflow_group', - 'cancel_table_runs', - 'list_workflow_outputs', ], }, }, @@ -3375,14 +3289,6 @@ export const UserTableOperation = { renameColumn: 'rename_column', deleteColumn: 'delete_column', updateColumn: 'update_column', - addWorkflowGroup: 'add_workflow_group', - updateWorkflowGroup: 'update_workflow_group', - deleteWorkflowGroup: 'delete_workflow_group', - addWorkflowGroupOutput: 'add_workflow_group_output', - deleteWorkflowGroupOutput: 'delete_workflow_group_output', - runWorkflowGroup: 'run_workflow_group', - cancelTableRuns: 'cancel_table_runs', - listWorkflowOutputs: 'list_workflow_outputs', } as const export type UserTableOperation = (typeof UserTableOperation)[keyof typeof UserTableOperation] @@ -3408,14 +3314,6 @@ export const UserTableOperationValues = [ UserTableOperation.renameColumn, UserTableOperation.deleteColumn, UserTableOperation.updateColumn, - UserTableOperation.addWorkflowGroup, - UserTableOperation.updateWorkflowGroup, - UserTableOperation.deleteWorkflowGroup, - UserTableOperation.addWorkflowGroupOutput, - UserTableOperation.deleteWorkflowGroupOutput, - UserTableOperation.runWorkflowGroup, - UserTableOperation.cancelTableRuns, - UserTableOperation.listWorkflowOutputs, ] as const export const WorkspaceFileOperation = { diff --git a/apps/sim/lib/copilot/generated/tool-schemas-v1.ts b/apps/sim/lib/copilot/generated/tool-schemas-v1.ts index bf53f649415..5a2e2a196d8 100644 --- a/apps/sim/lib/copilot/generated/tool-schemas-v1.ts +++ b/apps/sim/lib/copilot/generated/tool-schemas-v1.ts @@ -2594,16 +2594,6 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { type: 'object', description: 'Arguments for the operation', properties: { - autoRun: { - type: 'boolean', - description: - 'Optional flag for add_workflow_group. When true, existing rows whose dependencies are already filled run immediately. Default false: groups are staged silently — call run_workflow_group when ready to fire rows. Set true if the user explicitly asked you to start runs.', - }, - blockId: { - type: 'string', - description: - 'Source block ID inside the workflow. Used by add_workflow_group_output.', - }, column: { type: 'object', description: 'Column definition for add_column: { name, type, unique?, position? }', @@ -2611,7 +2601,7 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { columnName: { type: 'string', description: - 'Column name. Required for rename_column, update_column, and delete_workflow_group_output (the bound column to drop). Optional for add_workflow_group_output (auto-derived from path when omitted). Use columnNames array for batch delete_column.', + 'Column name (required for rename_column, update_column; use columnNames array for batch delete_column)', }, columnNames: { type: 'array', @@ -2622,27 +2612,6 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { type: 'object', description: 'Row data as key-value pairs (required for insert_row, update_row)', }, - dependencies: { - type: 'object', - description: - 'Dependencies the workflow group requires before running a row. { columns?: string[] } lists input column names that must be filled; { workflowGroups?: string[] } lists other workflow group IDs whose outputs must complete first. Used by add_workflow_group and update_workflow_group.', - properties: { - columns: { - type: 'array', - description: 'Input column names that must be filled before the group runs.', - items: { - type: 'string', - }, - }, - workflowGroups: { - type: 'array', - description: 'Other workflow group IDs whose outputs must complete first.', - items: { - type: 'string', - }, - }, - }, - }, description: { type: 'string', description: "Table description (optional for 'create')", @@ -2662,11 +2631,6 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { description: 'MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter', }, - groupId: { - type: 'string', - description: - 'Workflow group ID (required for update_workflow_group, delete_workflow_group, add_workflow_group_output, delete_workflow_group_output, run_workflow_group).', - }, limit: { type: 'number', description: 'Maximum rows to return or affect (optional, default 100)', @@ -2674,11 +2638,11 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { mapping: { type: 'object', description: - 'Optional explicit CSV-header → table-column mapping for import_file, as { "csvHeader": "columnName" | null }. A string maps the CSV header to that table column; null skips that CSV header (it won\'t be imported); omit a header entirely to fall back to auto-mapping by sanitized name (case-insensitive).', + 'Optional explicit CSV-header → table-column mapping for import_file, as { "csvHeader": "columnName" | null }. When omitted, headers are auto-matched by sanitized name (case-insensitive fallback). Use null to skip a CSV column.', additionalProperties: { - type: ['string', 'null'], + type: 'string', description: - "Target column name on the table. null skips that CSV header (it won't be imported); omit it entirely to fall back to auto-mapping.", + 'Target column name on the table. Use null to skip this CSV header instead of a column name.', }, }, mode: { @@ -2715,39 +2679,6 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { description: 'Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like "files/export.csv" — nested paths are not supported.', }, - outputs: { - type: 'array', - description: - "Outputs to surface as columns. Each entry maps a workflow block output to a table column: { blockId, path, columnName?, columnType? }. blockId is the source block; path is the dotted output path; columnName auto-derives from the path when omitted; columnType defaults from the leaf type when omitted. Used by add_workflow_group. (Also accepted by update_workflow_group for the UI's bulk replace, but the AI flow should use add_workflow_group_output / delete_workflow_group_output instead.) If unsure about valid (blockId, path) pairs, call list_workflow_outputs first — paths are validated against the live workflow and invalid picks return an error with the valid options. For Agent blocks with structured outputs, the structured fields appear as top-level paths (e.g. summary, industry); there is NO response.content path on a structured agent.", - items: { - type: 'object', - properties: { - blockId: { - type: 'string', - description: 'Source block ID inside the workflow.', - }, - columnName: { - type: 'string', - description: - 'Optional target column name. Auto-derived from the path when omitted.', - }, - columnType: { - type: 'string', - description: 'Optional column type. Defaults from the leaf type when omitted.', - enum: ['string', 'number', 'boolean', 'date', 'json'], - }, - path: { - type: 'string', - description: 'Dotted output path on the block.', - }, - }, - required: ['blockId', 'path'], - }, - }, - path: { - type: 'string', - description: 'Dotted output path on the block. Used by add_workflow_group_output.', - }, position: { type: 'integer', description: @@ -2763,38 +2694,21 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { }, rowId: { type: 'string', - description: - "Row ID. Required for get_row, update_row, delete_row, and for cancel_table_runs when scope:'row'.", + description: 'Row ID (required for get_row, update_row, delete_row)', }, rowIds: { type: 'array', - description: - 'Array of row IDs. Used by batch_delete_rows (rows to delete) and run_workflow_group (optional row scope: when omitted, runs across the whole table; when provided, only these rows are candidates and the eligibility predicate still applies — mid-run rows or rows with unmet deps are silently skipped).', - items: { - type: 'string', - }, + description: 'Array of row IDs to delete (for batch_delete_rows)', }, rows: { type: 'array', description: 'Array of row data objects (required for batch_insert_rows)', }, - runMode: { - type: 'string', - description: - "Run mode for run_workflow_group. 'incomplete' (default) re-runs only rows that never produced output or last failed; 'all' re-runs every dep-satisfied row.", - enum: ['incomplete', 'all'], - }, schema: { type: 'object', description: "Table schema with columns array (required for 'create'). Each column: { name, type, unique? }", }, - scope: { - type: 'string', - description: - "Cancellation scope for cancel_table_runs. 'all' cancels in-flight runs across the whole table; 'row' cancels only the row identified by rowId.", - enum: ['all', 'row'], - }, sort: { type: 'object', description: @@ -2826,11 +2740,6 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { description: 'Map of rowId to value for single-column batch update: { "rowId1": val1, "rowId2": val2 } (for batch_update_rows with columnName)', }, - workflowId: { - type: 'string', - description: - 'ID of the workflow (required for add_workflow_group and list_workflow_outputs).', - }, }, }, operation: { @@ -2857,14 +2766,6 @@ export const TOOL_RUNTIME_SCHEMAS: Record = { 'rename_column', 'delete_column', 'update_column', - 'add_workflow_group', - 'update_workflow_group', - 'delete_workflow_group', - 'add_workflow_group_output', - 'delete_workflow_group_output', - 'run_workflow_group', - 'cancel_table_runs', - 'list_workflow_outputs', ], }, }, diff --git a/apps/sim/lib/copilot/vfs/file-reader.test.ts b/apps/sim/lib/copilot/vfs/file-reader.test.ts index 1e202d77d5f..367861d038f 100644 --- a/apps/sim/lib/copilot/vfs/file-reader.test.ts +++ b/apps/sim/lib/copilot/vfs/file-reader.test.ts @@ -28,67 +28,79 @@ async function makeNoisePng(width: number, height: number): Promise { .toBuffer() } +// Both tests do real sharp work (encode + metadata read) that can exceed the +// default 10s timeout when CI runs them alongside thousands of other tests. +const SHARP_TEST_TIMEOUT_MS = 30_000 + describe('readFileRecord', () => { - it('returns small images as attachments without resize note', async () => { - const sharp = (await import('sharp')).default - const smallPng = await sharp({ - create: { - width: 200, - height: 200, - channels: 3, - background: { r: 255, g: 0, b: 0 }, - }, - }) - .png() - .toBuffer() - - downloadWorkspaceFile.mockResolvedValue(smallPng) - - const result = await readFileRecord({ - id: 'wf_small', - workspaceId: 'ws_1', - name: 'small.png', - key: 'uploads/small.png', - path: '/api/files/serve/uploads%2Fsmall.png?context=mothership', - size: smallPng.length, - type: 'image/png', - uploadedBy: 'user_1', - uploadedAt: new Date(), - deletedAt: null, - storageContext: 'mothership', - }) - - expect(result?.attachment?.type).toBe('image') - expect(result?.attachment?.source.media_type).toBe('image/png') - expect(result?.content).not.toContain('resized for vision') - expect(Buffer.from(result?.attachment?.source.data ?? '', 'base64')).toEqual(smallPng) - }) - - it('downscales oversized images into attachments that fit the read limit', async () => { - const largePng = await makeNoisePng(1800, 1800) - expect(largePng.length).toBeGreaterThan(MAX_IMAGE_READ_BYTES) - - downloadWorkspaceFile.mockResolvedValue(largePng) - - const result = await readFileRecord({ - id: 'wf_large', - workspaceId: 'ws_1', - name: 'chesspng.png', - key: 'uploads/chesspng.png', - path: '/api/files/serve/uploads%2Fchesspng.png?context=mothership', - size: largePng.length, - type: 'image/png', - uploadedBy: 'user_1', - uploadedAt: new Date(), - deletedAt: null, - storageContext: 'mothership', - }) - - expect(result?.attachment?.type).toBe('image') - expect(result?.content).toContain('resized for vision') - - const decoded = Buffer.from(result?.attachment?.source.data ?? '', 'base64') - expect(decoded.length).toBeLessThanOrEqual(MAX_IMAGE_READ_BYTES) - expect(result?.attachment?.source.media_type).toMatch(/^image\/(jpeg|webp|png)$/) - }) + it( + 'returns small images as attachments without resize note', + async () => { + const sharp = (await import('sharp')).default + const smallPng = await sharp({ + create: { + width: 200, + height: 200, + channels: 3, + background: { r: 255, g: 0, b: 0 }, + }, + }) + .png() + .toBuffer() + + downloadWorkspaceFile.mockResolvedValue(smallPng) + + const result = await readFileRecord({ + id: 'wf_small', + workspaceId: 'ws_1', + name: 'small.png', + key: 'uploads/small.png', + path: '/api/files/serve/uploads%2Fsmall.png?context=mothership', + size: smallPng.length, + type: 'image/png', + uploadedBy: 'user_1', + uploadedAt: new Date(), + deletedAt: null, + storageContext: 'mothership', + }) + + expect(result?.attachment?.type).toBe('image') + expect(result?.attachment?.source.media_type).toBe('image/png') + expect(result?.content).not.toContain('resized for vision') + expect(Buffer.from(result?.attachment?.source.data ?? '', 'base64')).toEqual(smallPng) + }, + SHARP_TEST_TIMEOUT_MS + ) + + it( + 'downscales oversized images into attachments that fit the read limit', + async () => { + const largePng = await makeNoisePng(1800, 1800) + expect(largePng.length).toBeGreaterThan(MAX_IMAGE_READ_BYTES) + + downloadWorkspaceFile.mockResolvedValue(largePng) + + const result = await readFileRecord({ + id: 'wf_large', + workspaceId: 'ws_1', + name: 'chesspng.png', + key: 'uploads/chesspng.png', + path: '/api/files/serve/uploads%2Fchesspng.png?context=mothership', + size: largePng.length, + type: 'image/png', + uploadedBy: 'user_1', + uploadedAt: new Date(), + deletedAt: null, + storageContext: 'mothership', + }) + + expect(result?.attachment?.type).toBe('image') + expect(result?.content).toContain('resized for vision') + + const decoded = Buffer.from(result?.attachment?.source.data ?? '', 'base64') + expect(decoded.length).toBeLessThanOrEqual(MAX_IMAGE_READ_BYTES) + expect(result?.attachment?.source.media_type).toMatch(/^image\/(jpeg|webp|png)$/) + }, + SHARP_TEST_TIMEOUT_MS + ) })