Skip to content

Commit 6829122

Browse files
Add tool to run selected rows
1 parent d6bf3cf commit 6829122

6 files changed

Lines changed: 44 additions & 12 deletions

File tree

apps/sim/app/api/table/[tableId]/groups/[groupId]/run/route.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ export const POST = withRouteHandler(async (request: NextRequest, { params }: Ro
3434
const parsed = await parseRequest(runWorkflowGroupContract, request, { params })
3535
if (!parsed.success) return parsed.response
3636
const { tableId, groupId } = parsed.data.params
37-
const { workspaceId, mode } = parsed.data.body
37+
const { workspaceId, mode, rowIds } = parsed.data.body
3838

3939
const result = await checkAccess(tableId, authResult.userId, 'write')
4040
if (!result.ok) return accessError(result, requestId, tableId)
@@ -50,6 +50,7 @@ export const POST = withRouteHandler(async (request: NextRequest, { params }: Ro
5050
workspaceId,
5151
mode,
5252
requestId,
53+
rowIds,
5354
})
5455

5556
return NextResponse.json({ success: true, data: { triggered } })

apps/sim/lib/api/contracts/tables.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -812,6 +812,11 @@ export const cancelTableRunsContract = defineRouteContract({
812812
export const runWorkflowGroupBodySchema = z.object({
813813
workspaceId: z.string().min(1, 'Workspace ID is required'),
814814
mode: z.enum(['all', 'incomplete']).default('all'),
815+
/** Optional row scope. When provided, only these rows are candidates — the
816+
* same eligibility predicate (deps satisfied, not in-flight, mode filter)
817+
* still applies, so a passed-in row that's mid-run or has unmet deps is
818+
* silently skipped. Omit to run across the entire table. */
819+
rowIds: z.array(z.string().min(1)).min(1).optional(),
815820
})
816821

817822
export const runWorkflowGroupContract = defineRouteContract({

apps/sim/lib/copilot/generated/tool-catalog-v1.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -789,7 +789,7 @@ export const UserTable: ToolCatalogEntry = {
789789
name: "user_table",
790790
route: "sim",
791791
mode: "async",
792-
parameters: {"type":"object","properties":{"args":{"type":"object","description":"Arguments for the operation","properties":{"autoRun":{"type":"boolean","description":"Optional flag for add_workflow_group. When true, existing rows whose dependencies are already filled run immediately. Default false: groups are staged silently — call run_workflow_group when ready to fire rows. Set true if the user explicitly asked you to start runs."},"column":{"type":"object","description":"Column definition for add_column: { name, type, unique?, position? }"},"columnName":{"type":"string","description":"Column name (required for rename_column, update_column; use columnNames array for batch delete_column)"},"columnNames":{"type":"array","description":"Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns."},"data":{"type":"object","description":"Row data as key-value pairs (required for insert_row, update_row)"},"dependencies":{"type":"object","description":"Dependencies the workflow group requires before running a row. { columns?: string[] } lists input column names that must be filled; { workflowGroups?: string[] } lists other workflow group IDs whose outputs must complete first. Used by add_workflow_group and update_workflow_group.","properties":{"columns":{"type":"array","description":"Input column names that must be filled before the group runs.","items":{"type":"string"}},"workflowGroups":{"type":"array","description":"Other workflow group IDs whose outputs must complete first.","items":{"type":"string"}}}},"description":{"type":"string","description":"Table description (optional for 'create')"},"fileId":{"type":"string","description":"Canonical workspace file ID for create_from_file/import_file. Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\")."},"filePath":{"type":"string","description":"Legacy workspace file reference for create_from_file/import_file. Prefer fileId."},"filter":{"type":"object","description":"MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter"},"groupId":{"type":"string","description":"Workflow group ID (required for update_workflow_group, delete_workflow_group, run_workflow_group)."},"limit":{"type":"number","description":"Maximum rows to return or affect (optional, default 100)"},"mapping":{"type":"object","description":"Optional explicit CSV-header → table-column mapping for import_file, as { \"csvHeader\": \"columnName\" | null }. A string maps the CSV header to that table column; null skips that CSV header (it won't be imported); omit a header entirely to fall back to auto-mapping by sanitized name (case-insensitive).","additionalProperties":{"type":["string","null"],"description":"Target column name on the table. null skips that CSV header (it won't be imported); omit it entirely to fall back to auto-mapping."}},"mode":{"type":"string","description":"Mode flag. For import_file: 'append' (default) adds rows, 'replace' truncates existing rows in a transaction before inserting. For run_workflow_group: 'incomplete' (default) re-runs only rows that never produced output or last failed, 'all' re-runs every dep-satisfied row.","enum":["append","replace","all","incomplete"]},"name":{"type":"string","description":"Table name (required for 'create')"},"newName":{"type":"string","description":"New column name (required for rename_column)"},"newType":{"type":"string","description":"New column type (optional for update_column). Types: string, number, boolean, date, json"},"offset":{"type":"number","description":"Number of rows to skip (optional for query_rows, default 0)"},"outputFormat":{"type":"string","description":"Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.","enum":["json","csv","txt","md","html"]},"outputPath":{"type":"string","description":"Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like \"files/export.csv\" — nested paths are not supported."},"outputs":{"type":"array","description":"Outputs to surface as columns. Each entry maps a workflow block output to a table column: { blockId, path, columnName?, columnType? }. blockId is the source block; path is the dotted output path; columnName auto-derives from the path when omitted; columnType defaults from the leaf type when omitted. Used by add_workflow_group and update_workflow_group. If unsure about valid (blockId, path) pairs, call list_workflow_outputs first — paths are validated against the live workflow and invalid picks return an error with the valid options. For Agent blocks with structured outputs, the structured fields appear as top-level paths (e.g. summary, industry); there is NO response.content path on a structured agent.","items":{"type":"object","properties":{"blockId":{"type":"string","description":"Source block ID inside the workflow."},"columnName":{"type":"string","description":"Optional target column name. Auto-derived from the path when omitted."},"columnType":{"type":"string","description":"Optional column type. Defaults from the leaf type when omitted.","enum":["string","number","boolean","date","json"]},"path":{"type":"string","description":"Dotted output path on the block."}},"required":["blockId","path"]}},"position":{"type":"integer","description":"Zero-based index at which to insert the row (optional, insert_row only). Rows at and below that index shift down. Omit to append at the end."},"positions":{"type":"array","description":"Per-row insertion indices for batch_insert_rows (optional). Must be the same length as rows and contain no duplicates. Values are final positions in the resulting table — lower-index shifts are applied automatically. Omit to append all rows at the end.","items":{"type":"integer"}},"rowId":{"type":"string","description":"Row ID (required for get_row, update_row, delete_row)"},"rowIds":{"type":"array","description":"Array of row IDs to delete (for batch_delete_rows)"},"rows":{"type":"array","description":"Array of row data objects (required for batch_insert_rows)"},"schema":{"type":"object","description":"Table schema with columns array (required for 'create'). Each column: { name, type, unique? }"},"scope":{"type":"string","description":"Cancellation scope for cancel_table_runs. 'all' cancels in-flight runs across the whole table; 'row' cancels only the row identified by rowId.","enum":["all","row"]},"sort":{"type":"object","description":"Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)"},"tableId":{"type":"string","description":"Table ID (required for most operations except 'create' and batch 'delete')"},"tableIds":{"type":"array","description":"Array of table IDs (for batch delete)","items":{"type":"string"}},"unique":{"type":"boolean","description":"Set column unique constraint (optional for update_column)"},"updates":{"type":"array","description":"Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)"},"values":{"type":"object","description":"Map of rowId to value for single-column batch update: { \"rowId1\": val1, \"rowId2\": val2 } (for batch_update_rows with columnName)"},"workflowId":{"type":"string","description":"ID of the workflow (required for add_workflow_group and list_workflow_outputs)."}}},"operation":{"type":"string","description":"The operation to perform","enum":["create","create_from_file","import_file","get","get_schema","delete","insert_row","batch_insert_rows","get_row","query_rows","update_row","delete_row","update_rows_by_filter","delete_rows_by_filter","batch_update_rows","batch_delete_rows","add_column","rename_column","delete_column","update_column","add_workflow_group","update_workflow_group","delete_workflow_group","run_workflow_group","cancel_table_runs","list_workflow_outputs"]}},"required":["operation","args"]},
792+
parameters: {"type":"object","properties":{"args":{"type":"object","description":"Arguments for the operation","properties":{"autoRun":{"type":"boolean","description":"Optional flag for add_workflow_group. When true, existing rows whose dependencies are already filled run immediately. Default false: groups are staged silently — call run_workflow_group when ready to fire rows. Set true if the user explicitly asked you to start runs."},"column":{"type":"object","description":"Column definition for add_column: { name, type, unique?, position? }"},"columnName":{"type":"string","description":"Column name (required for rename_column, update_column; use columnNames array for batch delete_column)"},"columnNames":{"type":"array","description":"Array of column names to delete at once (for delete_column). Preferred over columnName when deleting multiple columns."},"data":{"type":"object","description":"Row data as key-value pairs (required for insert_row, update_row)"},"dependencies":{"type":"object","description":"Dependencies the workflow group requires before running a row. { columns?: string[] } lists input column names that must be filled; { workflowGroups?: string[] } lists other workflow group IDs whose outputs must complete first. Used by add_workflow_group and update_workflow_group.","properties":{"columns":{"type":"array","description":"Input column names that must be filled before the group runs.","items":{"type":"string"}},"workflowGroups":{"type":"array","description":"Other workflow group IDs whose outputs must complete first.","items":{"type":"string"}}}},"description":{"type":"string","description":"Table description (optional for 'create')"},"fileId":{"type":"string","description":"Canonical workspace file ID for create_from_file/import_file. Discover via read(\"files/{name}/meta.json\") or glob(\"files/by-id/*/meta.json\")."},"filePath":{"type":"string","description":"Legacy workspace file reference for create_from_file/import_file. Prefer fileId."},"filter":{"type":"object","description":"MongoDB-style filter for query_rows, update_rows_by_filter, delete_rows_by_filter"},"groupId":{"type":"string","description":"Workflow group ID (required for update_workflow_group, delete_workflow_group, run_workflow_group)."},"limit":{"type":"number","description":"Maximum rows to return or affect (optional, default 100)"},"mapping":{"type":"object","description":"Optional explicit CSV-header → table-column mapping for import_file, as { \"csvHeader\": \"columnName\" | null }. A string maps the CSV header to that table column; null skips that CSV header (it won't be imported); omit a header entirely to fall back to auto-mapping by sanitized name (case-insensitive).","additionalProperties":{"type":["string","null"],"description":"Target column name on the table. null skips that CSV header (it won't be imported); omit it entirely to fall back to auto-mapping."}},"mode":{"type":"string","description":"Mode flag. For import_file: 'append' (default) adds rows, 'replace' truncates existing rows in a transaction before inserting. For run_workflow_group: 'incomplete' (default) re-runs only rows that never produced output or last failed, 'all' re-runs every dep-satisfied row.","enum":["append","replace","all","incomplete"]},"name":{"type":"string","description":"Table name (required for 'create')"},"newName":{"type":"string","description":"New column name (required for rename_column)"},"newType":{"type":"string","description":"New column type (optional for update_column). Types: string, number, boolean, date, json"},"offset":{"type":"number","description":"Number of rows to skip (optional for query_rows, default 0)"},"outputFormat":{"type":"string","description":"Explicit format override for outputPath. Usually unnecessary — the file extension determines the format automatically. Only use this to force a different format than what the extension implies.","enum":["json","csv","txt","md","html"]},"outputPath":{"type":"string","description":"Pipe query_rows results directly to a NEW workspace file. The format is auto-inferred from the file extension: .csv → CSV, .json → JSON, .md → Markdown, etc. Use .csv for tabular exports. Use a flat path like \"files/export.csv\" — nested paths are not supported."},"outputs":{"type":"array","description":"Outputs to surface as columns. Each entry maps a workflow block output to a table column: { blockId, path, columnName?, columnType? }. blockId is the source block; path is the dotted output path; columnName auto-derives from the path when omitted; columnType defaults from the leaf type when omitted. Used by add_workflow_group and update_workflow_group. If unsure about valid (blockId, path) pairs, call list_workflow_outputs first — paths are validated against the live workflow and invalid picks return an error with the valid options. For Agent blocks with structured outputs, the structured fields appear as top-level paths (e.g. summary, industry); there is NO response.content path on a structured agent.","items":{"type":"object","properties":{"blockId":{"type":"string","description":"Source block ID inside the workflow."},"columnName":{"type":"string","description":"Optional target column name. Auto-derived from the path when omitted."},"columnType":{"type":"string","description":"Optional column type. Defaults from the leaf type when omitted.","enum":["string","number","boolean","date","json"]},"path":{"type":"string","description":"Dotted output path on the block."}},"required":["blockId","path"]}},"position":{"type":"integer","description":"Zero-based index at which to insert the row (optional, insert_row only). Rows at and below that index shift down. Omit to append at the end."},"positions":{"type":"array","description":"Per-row insertion indices for batch_insert_rows (optional). Must be the same length as rows and contain no duplicates. Values are final positions in the resulting table — lower-index shifts are applied automatically. Omit to append all rows at the end.","items":{"type":"integer"}},"rowId":{"type":"string","description":"Row ID (required for get_row, update_row, delete_row)"},"rowIds":{"type":"array","description":"Array of row IDs. Used by batch_delete_rows (rows to delete) and run_workflow_group (optional row scope: when omitted, runs across the whole table; when provided, only these rows are candidates and the eligibility predicate still applies — mid-run rows or rows with unmet deps are silently skipped).","items":{"type":"string"}},"rows":{"type":"array","description":"Array of row data objects (required for batch_insert_rows)"},"schema":{"type":"object","description":"Table schema with columns array (required for 'create'). Each column: { name, type, unique? }"},"scope":{"type":"string","description":"Cancellation scope for cancel_table_runs. 'all' cancels in-flight runs across the whole table; 'row' cancels only the row identified by rowId.","enum":["all","row"]},"sort":{"type":"object","description":"Sort specification as { field: 'asc' | 'desc' } (optional for query_rows)"},"tableId":{"type":"string","description":"Table ID (required for most operations except 'create' and batch 'delete')"},"tableIds":{"type":"array","description":"Array of table IDs (for batch delete)","items":{"type":"string"}},"unique":{"type":"boolean","description":"Set column unique constraint (optional for update_column)"},"updates":{"type":"array","description":"Array of per-row updates: [{ rowId, data: { col: val } }] (for batch_update_rows)"},"values":{"type":"object","description":"Map of rowId to value for single-column batch update: { \"rowId1\": val1, \"rowId2\": val2 } (for batch_update_rows with columnName)"},"workflowId":{"type":"string","description":"ID of the workflow (required for add_workflow_group and list_workflow_outputs)."}}},"operation":{"type":"string","description":"The operation to perform","enum":["create","create_from_file","import_file","get","get_schema","delete","insert_row","batch_insert_rows","get_row","query_rows","update_row","delete_row","update_rows_by_filter","delete_rows_by_filter","batch_update_rows","batch_delete_rows","add_column","rename_column","delete_column","update_column","add_workflow_group","update_workflow_group","delete_workflow_group","run_workflow_group","cancel_table_runs","list_workflow_outputs"]}},"required":["operation","args"]},
793793
resultSchema: {"type":"object","properties":{"data":{"type":"object","description":"Operation-specific result payload."},"message":{"type":"string","description":"Human-readable outcome summary."},"success":{"type":"boolean","description":"Whether the operation succeeded."}},"required":["success","message"]},
794794
requiresConfirmation: true,
795795
};

apps/sim/lib/copilot/generated/tool-schemas-v1.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2979,7 +2979,10 @@ export const TOOL_RUNTIME_SCHEMAS: Record<string, ToolRuntimeSchemaEntry> = {
29792979
},
29802980
"rowIds": {
29812981
"type": "array",
2982-
"description": "Array of row IDs to delete (for batch_delete_rows)"
2982+
"description": "Array of row IDs. Used by batch_delete_rows (rows to delete) and run_workflow_group (optional row scope: when omitted, runs across the whole table; when provided, only these rows are candidates and the eligibility predicate still applies — mid-run rows or rows with unmet deps are silently skipped).",
2983+
"items": {
2984+
"type": "string"
2985+
}
29832986
},
29842987
"rows": {
29852988
"type": "array",

0 commit comments

Comments
 (0)