From ade5345c9ddef30f36ed4454afa2d11401ccc91f Mon Sep 17 00:00:00 2001 From: Dan Lynch Date: Thu, 14 May 2026 17:23:06 +0000 Subject: [PATCH 1/4] =?UTF-8?q?refactor:=20rename=20node=20type=20taxonomy?= =?UTF-8?q?=20(Data*=20=E2=86=92=20Job*/Process*)=20+=20add=20ProcessExtra?= =?UTF-8?q?ction,=20ProcessImageVersions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Renames: - DataJobTrigger → JobTrigger (category: job) - DataFileEmbedding → ProcessFileEmbedding (category: process) - DataChunks → ProcessChunks (category: process) - DataImageEmbedding → ProcessImageEmbedding (category: process) New nodes: - ProcessExtraction: file text extraction pipeline (output fields + MIME-conditional job trigger) - ProcessImageVersions: image variant generation pipeline (MIME-conditional job trigger) Also adds 'check', 'job', 'process' to codegen categoryOrder so all categories generate proper Params interfaces. Refs: constructive-planning#857, constructive-planning#848 --- .../src/blueprint-types.generated.ts | 419 +++++++++++++++--- .../src/codegen/generate-types.ts | 2 +- .../src/data/data-chunks.ts | 10 +- .../src/data/data-file-embedding.ts | 14 +- .../src/data/data-image-embedding.ts | 24 +- .../src/data/data-job-trigger.ts | 6 +- packages/node-type-registry/src/data/index.ts | 10 +- .../src/data/process-extraction.ts | 114 +++++ .../src/data/process-image-versions.ts | 146 ++++++ 9 files changed, 646 insertions(+), 99 deletions(-) create mode 100644 packages/node-type-registry/src/data/process-extraction.ts create mode 100644 packages/node-type-registry/src/data/process-image-versions.ts diff --git a/packages/node-type-registry/src/blueprint-types.generated.ts b/packages/node-type-registry/src/blueprint-types.generated.ts index c898202f6..53cb6baf2 100644 --- a/packages/node-type-registry/src/blueprint-types.generated.ts +++ b/packages/node-type-registry/src/blueprint-types.generated.ts @@ -35,12 +35,79 @@ export interface TriggerCondition { /** Negated condition. */ NOT?: TriggerCondition; } +/** + * =========================================================================== + * Check node type parameters + * =========================================================================== + */ +; +/** Adds a CHECK constraint that validates a column value is greater than a threshold (single-column: column > value) or that one column is greater than another (cross-column: columns[0] > columns[1]). Compiled via AST helpers. */ +export interface CheckGreaterThanParams { + /* Single column to compare against value (mutually exclusive with columns) */ + column?: string; + /* Threshold value for single-column comparison (column > value) */ + value?: number; + /* Two columns for cross-column comparison (columns[0] > columns[1]) */ + columns?: string[]; +} +/** Adds a CHECK constraint that validates a column value is less than a threshold (single-column: column < value) or that one column is less than another (cross-column: columns[0] < columns[1]). Compiled via AST helpers. */ +export interface CheckLessThanParams { + /* Single column to compare against value (mutually exclusive with columns) */ + column?: string; + /* Threshold value for single-column comparison (column < value) */ + value?: number; + /* Two columns for cross-column comparison (columns[0] < columns[1]) */ + columns?: string[]; +} +/** Adds a CHECK constraint that validates two columns are not equal (columns[0] != columns[1]). Useful for preventing self-referencing rows. Compiled via AST helpers. */ +export interface CheckNotEqualParams { + /* Two columns that must not be equal */ + columns: string[]; +} +/** Adds a CHECK constraint that validates a column value is one of an allowed set (e.g. tier IN ('free', 'paid', 'custom')). Compiled to column = ANY(ARRAY[...]) via AST helpers. */ +export interface CheckOneOfParams { + /* Column to validate against the allowed values */ + column: string; + /* Array of allowed values for the column */ + values: string[]; +} /** * =========================================================================== * Data node type parameters * =========================================================================== */ ; +/** Declaratively attaches aggregate limit-tracking triggers to a table. On INSERT the named limit is incremented per entity; on DELETE it is decremented. Uses org_limit_aggregates_inc/dec for per-entity (org-level) aggregate limits rather than per-user limits. Requires a provisioned limits_module for the target database. */ +export interface DataAggregateLimitCounterParams { + /* Name of the aggregate limit to track (must match a default_limits entry, e.g. "databases", "members") */ + limit_name: string; + /* Column on the target table that holds the entity id for aggregate limit lookup */ + entity_field?: string; + /* Which DML events to attach triggers for */ + events?: ('INSERT' | 'DELETE' | 'UPDATE')[]; +} +/** Declaratively attaches billing usage-recording triggers to a table. On INSERT the named meter is incremented via record_usage; on DELETE it is decremented (reversal). On UPDATE, if the entity_field changes, the old entity is decremented and the new entity is incremented. Requires a provisioned billing_module for the target database. */ +export interface DataBillingMeterParams { + /* Slug of the billing meter to record usage against (must match a meters table entry, e.g. "databases", "seats") */ + meter_slug: string; + /* Column on the target table that holds the entity id for billing */ + entity_field?: string; + /* Units to record per event (default 1) */ + quantity?: number; + /* Which DML events to attach triggers for */ + events?: ('INSERT' | 'DELETE' | 'UPDATE')[]; +} +/** Enables bulk mutation smart tags on a table. When provisioned, adds @behavior tags for the selected bulk operations (insert, upsert, update, delete). Requires the graphile-bulk-mutations plugin. */ +export interface DataBulkParams { + /* Enable bulk insert (+bulkInsert) */ + insert?: boolean; + /* Enable bulk upsert (+bulkUpsert) */ + upsert?: boolean; + /* Enable bulk update (+bulkUpdate) */ + update?: boolean; + /* Enable bulk delete (+bulkDelete) */ + delete?: boolean; +} /** Creates a derived text field that automatically concatenates multiple source fields via BEFORE INSERT/UPDATE triggers. Used to produce a unified text representation (e.g., embedding_text) from multiple columns on a table. The trigger fires with '_000' prefix to run before Search* triggers alphabetically. */ export interface DataCompositeFieldParams { /* Name of the derived text field to create (default: 'embedding_text') */ @@ -87,25 +154,6 @@ export interface DataIdParams { /* Column name for the primary key */ field_name?: string; } -/** Composition wrapper that creates a vector embedding field with HNSW/IVFFlat index (via SearchVector) and a job trigger with compound conditions (via DataJobTrigger) that fires on INSERT for image files matching mime_type patterns. Designed for storage file tables. */ -export interface DataImageEmbeddingParams { - /* Name of the vector embedding column */ - field_name?: string; - /* Vector dimensions */ - dimensions?: number; - /* Index type for similarity search */ - index_method?: 'hnsw' | 'ivfflat'; - /* Distance metric */ - metric?: 'cosine' | 'l2' | 'ip'; - /* Job task identifier for the embedding worker */ - task_identifier?: string; - /* MIME type LIKE patterns to match (e.g., image/%, video/%). Multiple patterns are OR'd together. */ - mime_patterns?: string[]; - /* Custom payload key-to-column mapping for the job trigger */ - payload_custom?: { - [key: string]: unknown; - }; -} /** BEFORE UPDATE trigger that prevents changes to a list of specified fields after INSERT. Raises an exception if any of the listed fields have changed. Unlike FieldImmutable (single-field), this handles multiple fields in a single trigger for efficiency. */ export interface DataImmutableFieldsParams { /* Field names that cannot be modified after INSERT (e.g. ["key", "bucket_id", "owner_id"]) */ @@ -129,43 +177,6 @@ export interface DataInheritFromParentParams { /* Parent table schema (optional, defaults to same schema as child table) */ parent_schema?: string; } -/** Dynamically creates PostgreSQL triggers that enqueue jobs via app_jobs.add_job() when table rows are inserted, updated, or deleted. Supports configurable payload strategies (full row, row ID, selected fields, or custom mapping), conditional firing via WHEN clauses, watched field changes, and extended job options (queue, priority, delay, max attempts). */ -export interface DataJobTriggerParams { - /* Job task identifier passed to add_job (e.g., process_invoice, sync_to_stripe) */ - task_identifier: string; - /* How to build the job payload: row (full NEW/OLD), row_id (just id), fields (selected columns), custom (mapped columns) */ - payload_strategy?: 'row' | 'row_id' | 'fields' | 'custom'; - /* Column names to include in payload (only for fields strategy) */ - payload_fields?: string[]; - /* Key-to-column mapping for custom payload (e.g., {"invoice_id": "id", "total": "amount"}) */ - payload_custom?: { - [key: string]: unknown; - }; - /* Trigger events to create */ - events?: ('INSERT' | 'UPDATE' | 'DELETE')[]; - /* Include OLD row in payload (for UPDATE triggers) */ - include_old?: boolean; - /* Include table/schema metadata in payload */ - include_meta?: boolean; - /* Column name for conditional WHEN clause (fires only when field equals condition_value) */ - condition_field?: string; - /* Value to compare against condition_field in WHEN clause */ - condition_value?: string; - /* Compound conditions for the trigger WHEN clause. Accepts a single leaf condition, an array of conditions (implicitly AND), or a nested combinator tree ({AND: [...], OR: [...], NOT: {...}}). Each leaf is {field, op, value?, row?, ref?}. Column types are resolved automatically from the table schema. Cannot be combined with condition_field or watch_fields. */ - conditions?: TriggerCondition | TriggerCondition[]; - /* For UPDATE triggers, only fire when these fields change (uses DISTINCT FROM) */ - watch_fields?: string[]; - /* Static job key for upsert semantics (prevents duplicate jobs) */ - job_key?: string; - /* Job queue name for routing to specific workers */ - queue_name?: string; - /* Job priority (lower = higher priority) */ - priority?: number; - /* Delay before job runs as PostgreSQL interval (e.g., 30 seconds, 5 minutes) */ - run_at_delay?: string; - /* Maximum retry attempts for the job */ - max_attempts?: number; -} /** Declaratively attaches limit-tracking triggers to a table. On INSERT the named limit is incremented; on DELETE it is decremented. Requires a provisioned limits_module for the target scope. */ export interface DataLimitCounterParams { /* Name of the limit to track (must match a default_limits entry, e.g. "projects", "members") */ @@ -226,6 +237,13 @@ export interface DataPublishableParams { /* If true, also adds a UUID primary key column with auto-generation */ include_id?: boolean; } +/** Creates per-table subscriber tables in subscriptions_public with RLS policies derived from source table SELECT policies. Attaches statement-level triggers to emit changes to subscribers. */ +export interface DataRealtimeParams { + /* Which DML operations to track with emit_change triggers */ + operations?: ('INSERT' | 'UPDATE' | 'DELETE')[]; + /* Custom name for the subscriber table (defaults to {source_table}_subscriber) */ + subscriber_table_name?: string; +} /** Auto-generates URL-friendly slugs from field values on insert/update. Attaches BEFORE INSERT and BEFORE UPDATE triggers that call inflection.slugify() on the target field. References fields by name in data jsonb. */ export interface DataSlugParams { /* Name of the field to slugify */ @@ -407,7 +425,7 @@ export interface SearchUnifiedParams { /* Decay rate for recency boost (0-1, lower = faster decay) */boost_recency_decay?: number; }; } -/** Adds a vector embedding column with HNSW or IVFFlat index for similarity search. Supports configurable dimensions, distance metrics (cosine, l2, ip), stale tracking strategies (column, null, hash), and automatic job enqueue triggers for embedding generation. */ +/** Adds a vector embedding column with HNSW or IVFFlat index for similarity search. Supports configurable dimensions, distance metrics (cosine, l2, ip), per-field {field_name}_updated_at timestamp tracking (read-only in GraphQL), and automatic job enqueue triggers for embedding generation. */ export interface SearchVectorParams { /* Name of the vector column */ field_name?: string; @@ -421,16 +439,12 @@ export interface SearchVectorParams { index_options?: { [key: string]: unknown; }; - /* When stale_strategy is column, adds an embedding_stale boolean field */ - include_stale_field?: boolean; /* Column names that feed the embedding. Used by stale trigger to detect content changes. */ source_fields?: string[]; /* Auto-create trigger that enqueues embedding generation jobs */ enqueue_job?: boolean; /* Task identifier for the job queue */ job_task_name?: string; - /* Strategy for tracking embedding staleness. column: embedding_stale boolean. null: set embedding to NULL. hash: md5 hash of source fields. */ - stale_strategy?: 'column' | 'null' | 'hash'; /* Chunking configuration for long-text embedding. Creates an embedding_chunks record that drives automatic text splitting and per-chunk embedding. Omit to skip chunking. */ chunks?: { /* Name of the text content column in the chunks table */content_field_name?: string; @@ -444,6 +458,221 @@ export interface SearchVectorParams { /* Task identifier for the chunking job queue */chunking_task_name?: string; }; } +/** + * =========================================================================== + * Job node type parameters + * =========================================================================== + */ +; +/** Dynamically creates PostgreSQL triggers that enqueue jobs via app_jobs.add_job() when table rows are inserted, updated, or deleted. Supports configurable payload strategies (full row, row ID, selected fields, or custom mapping), conditional firing via WHEN clauses, watched field changes, and extended job options (queue, priority, delay, max attempts). */ +export interface JobTriggerParams { + /* Job task identifier passed to add_job (e.g., process_invoice, sync_to_stripe) */ + task_identifier: string; + /* How to build the job payload: row (full NEW/OLD), row_id (just id), fields (selected columns), custom (mapped columns) */ + payload_strategy?: 'row' | 'row_id' | 'fields' | 'custom'; + /* Column names to include in payload (only for fields strategy) */ + payload_fields?: string[]; + /* Key-to-column mapping for custom payload (e.g., {"invoice_id": "id", "total": "amount"}) */ + payload_custom?: { + [key: string]: unknown; + }; + /* Trigger events to create */ + events?: ('INSERT' | 'UPDATE' | 'DELETE')[]; + /* Include OLD row in payload (for UPDATE triggers) */ + include_old?: boolean; + /* Include table/schema metadata in payload */ + include_meta?: boolean; + /* Column name for conditional WHEN clause (fires only when field equals condition_value) */ + condition_field?: string; + /* Value to compare against condition_field in WHEN clause */ + condition_value?: string; + /* Compound conditions for the trigger WHEN clause. Accepts a single leaf condition, an array of conditions (implicitly AND), or a nested combinator tree ({AND: [...], OR: [...], NOT: {...}}). Each leaf is {field, op, value?, row?, ref?}. Column types are resolved automatically from the table schema. Cannot be combined with condition_field or watch_fields. */ + conditions?: TriggerCondition | TriggerCondition[]; + /* For UPDATE triggers, only fire when these fields change (uses DISTINCT FROM) */ + watch_fields?: string[]; + /* Static job key for upsert semantics (prevents duplicate jobs) */ + job_key?: string; + /* Job queue name for routing to specific workers */ + queue_name?: string; + /* Job priority (lower = higher priority) */ + priority?: number; + /* Delay before job runs as PostgreSQL interval (e.g., 30 seconds, 5 minutes) */ + run_at_delay?: string; + /* Maximum retry attempts for the job */ + max_attempts?: number; +} +/** + * =========================================================================== + * Process node type parameters + * =========================================================================== + */ +; +/** Creates a chunked-embedding child table for any parent table. Provisions the chunks table with content, chunk_index, embedding vector, metadata, HNSW index, inherited RLS, and optional job trigger for automatic text splitting. Composed internally by ProcessFileEmbedding (enabled by default in extract mode) but can also be used standalone. */ +export interface ProcessChunksParams { + /* Name of the text content column in the chunks table */ + content_field_name?: string; + /* Maximum number of characters per chunk */ + chunk_size?: number; + /* Number of overlapping characters between consecutive chunks */ + chunk_overlap?: number; + /* Strategy for splitting text into chunks */ + chunk_strategy?: 'fixed' | 'sentence' | 'paragraph' | 'semantic'; + /* Vector dimensions for per-chunk embeddings */ + dimensions?: number; + /* Distance metric for the HNSW index on chunk embeddings */ + metric?: 'cosine' | 'l2' | 'ip'; + /* Override the chunks table name. Defaults to {parent_table}_chunks. */ + chunks_table_name?: string; + /* Field names from the parent table to copy into chunk metadata */ + metadata_fields?: string[]; + /* Whether to create a job trigger that auto-enqueues chunking on parent INSERT/UPDATE */ + enqueue_chunking_job?: boolean; + /* Task identifier for the chunking job queue */ + chunking_task_name?: string; +} +/** Generic, MIME-scoped embedding node for file tables. Supports two modes: direct (whole-file to single vector, e.g. CLIP for images) when extraction is omitted, or extract (file to text to chunks to per-chunk vectors) when extraction config is provided. Composes SearchVector + JobTrigger + ProcessChunks (enabled by default in extract mode) internally. Multiple instances can coexist on the same table with different MIME scopes, field names, and embedding strategies. */ +export interface ProcessFileEmbeddingParams { + /* Name of the vector embedding column */ + field_name?: string; + /* Vector dimensions (e.g. 512 for CLIP, 768 for nomic, 1536 for ada-002) */ + dimensions?: number; + /* Index type for similarity search */ + index_method?: 'hnsw' | 'ivfflat'; + /* Distance metric */ + metric?: 'cosine' | 'l2' | 'ip'; + /* Index-specific options. HNSW: {m, ef_construction}. IVFFlat: {lists}. */ + index_options?: { + [key: string]: unknown; + }; + /* MIME type LIKE patterns to match. Multiple patterns are OR'd together. Examples: ['image/%'], ['application/pdf', 'text/%'], ['audio/%']. */ + mime_patterns?: string[]; + /* Job task identifier for the worker. In direct mode this is the embedding worker; in extract mode this is the extraction worker. */ + task_identifier?: string; + /* Trigger events that fire the job */ + events?: ('INSERT' | 'UPDATE')[]; + /* Custom payload key-to-column mapping for the job trigger */ + payload_custom?: { + [key: string]: unknown; + }; + /* Additional compound conditions beyond MIME filtering. Merged with the auto-generated MIME conditions via AND. Use this to add status checks, field guards, etc. */ + trigger_conditions?: TriggerCondition | TriggerCondition[]; + /* Text extraction configuration. When present, the generator creates extraction output fields on the table and configures SearchVector with source_fields + stale tracking. When absent, the node operates in direct mode (single vector per file, no text extraction). */ + extraction?: { + /* Field to store extracted text/markdown */text_field?: string; + /* JSONB field for extraction metadata (page count, language, etc.) */metadata_field?: string; + }; + /* Whether to create a chunks table via ProcessChunks. Defaults to true when extraction is provided, false in direct mode. Set explicitly to override. */ + include_chunks?: boolean; + /* Chunking configuration passed through to ProcessChunks. When include_chunks is true (or defaults to true in extract mode), these params configure the chunks table, embedding dimensions, strategy, etc. */ + chunks?: { + /* Name of the text content column in the chunks table */content_field_name?: string; + /* Maximum number of characters per chunk */chunk_size?: number; + /* Number of overlapping characters between consecutive chunks */chunk_overlap?: number; + /* Strategy for splitting text into chunks */chunk_strategy?: 'fixed' | 'sentence' | 'paragraph' | 'semantic'; + /* Field names from parent to copy into chunk metadata */metadata_fields?: string[]; + /* Whether to auto-enqueue a chunking job on insert/update */enqueue_chunking_job?: boolean; + /* Task identifier for the chunking job queue */chunking_task_name?: string; + }; +} +/** Image-specific preset of ProcessFileEmbedding. Delegates to ProcessFileEmbedding with image-oriented defaults: dimensions=512 (CLIP), mime_patterns=['image/%'], task_identifier='process_image_embedding', direct mode (no extraction). Accepts all ProcessFileEmbedding parameters — any overrides are forwarded through. */ +export interface ProcessImageEmbeddingParams { + /* Name of the vector embedding column */ + field_name?: string; + /* Vector dimensions (default 512 for CLIP-style image embeddings) */ + dimensions?: number; + /* Index type for similarity search */ + index_method?: 'hnsw' | 'ivfflat'; + /* Distance metric */ + metric?: 'cosine' | 'l2' | 'ip'; + /* Index-specific options. HNSW: {m, ef_construction}. IVFFlat: {lists}. */ + index_options?: { + [key: string]: unknown; + }; + /* MIME type LIKE patterns to match. Multiple patterns are OR'd together. */ + mime_patterns?: string[]; + /* Job task identifier for the image embedding worker */ + task_identifier?: string; + /* Trigger events that fire the job */ + events?: ('INSERT' | 'UPDATE')[]; + /* Custom payload key-to-column mapping for the job trigger */ + payload_custom?: { + [key: string]: unknown; + }; + /* Additional compound conditions beyond MIME filtering. Merged with the auto-generated MIME conditions via AND. */ + trigger_conditions?: TriggerCondition | TriggerCondition[]; + /* Text extraction configuration. Forwarded to ProcessFileEmbedding. When present, enables extract mode (e.g., OCR for images). */ + extraction?: { + /* Field to store extracted text */text_field?: string; + /* JSONB field for extraction metadata */metadata_field?: string; + }; + /* Chunking configuration. Forwarded to ProcessFileEmbedding. Only meaningful when extraction is also provided. */ + chunks?: { + content_field_name?: string; + chunk_size?: number; + chunk_overlap?: number; + chunk_strategy?: 'fixed' | 'sentence' | 'paragraph' | 'semantic'; + metadata_fields?: { + [key: string]: unknown; + }; + enqueue_chunking_job?: boolean; + chunking_task_name?: string; + }; +} +/** Creates extraction output fields and a job trigger for file text extraction. Fires when a file is uploaded (status = 'uploaded') or on INSERT. The external worker extracts text/metadata from the file (PDF, DOCX, HTML, etc.) and writes the result back to the configured output fields. Typically used upstream of ProcessFileEmbedding or ProcessChunks. */ +export interface ProcessExtractionParams { + /* Field to store extracted text/markdown */ + text_field?: string; + /* JSONB field for extraction metadata (page count, language, etc.) */ + metadata_field?: string; + /* MIME type LIKE patterns to match. Multiple patterns are OR'd together. Examples: ['application/pdf', 'text/%'], ['application/vnd.openxmlformats%']. */ + mime_patterns?: string[]; + /* Job task identifier for the extraction worker */ + task_identifier?: string; + /* Trigger events that fire the job */ + events?: ('INSERT' | 'UPDATE')[]; + /* Custom payload key-to-column mapping for the job trigger */ + payload_custom?: { + [key: string]: unknown; + }; + /* Additional compound conditions beyond MIME filtering. Merged with the auto-generated MIME conditions via AND. Use this to add status checks (e.g., status = 'uploaded'). */ + trigger_conditions?: TriggerCondition | TriggerCondition[]; + /* Job queue name for extraction tasks */ + queue_name?: string; + /* Maximum number of retry attempts */ + max_attempts?: number; + /* Job priority (lower = higher priority) */ + priority?: number; +} +/** Creates a job trigger for image variant generation. Fires when an image file is uploaded (status = 'uploaded') or on INSERT. The external worker generates resized, cropped, or reformatted versions (thumbnails, previews, WebP conversions, etc.) and stores them as new file records linked to the source image. */ +export interface ProcessImageVersionsParams { + /* Array of version definitions. Each version specifies dimensions, format, and quality for a generated image variant. */ + versions?: { + /* Version identifier (e.g., "thumb", "preview", "hero") */name: string; + /* Target width in pixels */width?: number; + /* Target height in pixels */height?: number; + /* Resize fitting strategy */fit?: 'cover' | 'contain' | 'fill' | 'inside' | 'outside'; + /* Output image format */format?: 'jpeg' | 'png' | 'webp' | 'avif'; + /* Output quality (1-100) */quality?: number; + }[]; + /* MIME type LIKE patterns to match. Defaults to all image types. */ + mime_patterns?: string[]; + /* Job task identifier for the image processing worker */ + task_identifier?: string; + /* Trigger events that fire the job */ + events?: ('INSERT' | 'UPDATE')[]; + /* Custom payload key-to-column mapping for the job trigger */ + payload_custom?: { + [key: string]: unknown; + }; + /* Additional compound conditions beyond MIME filtering. Merged with the auto-generated MIME conditions via AND. */ + trigger_conditions?: TriggerCondition | TriggerCondition[]; + /* Job queue name for image processing tasks */ + queue_name?: string; + /* Maximum number of retry attempts */ + max_attempts?: number; + /* Job priority (lower = higher priority) */ + priority?: number; +} /** * =========================================================================== * Authz node type parameters @@ -475,6 +704,23 @@ export interface AuthzCompositeParams { } /** Denies all access. Generates FALSE expression. */ export type AuthzDenyAllParams = {}; +/** Path-scoped file sharing via ltree containment. Grants access when a path_shares row matches the current user, bucket, and an ancestor path with the required permission. */ +export interface AuthzFilePathParams { + /* Schema of the path_shares table */ + shares_schema: string; + /* Name of the path_shares table */ + shares_table: string; + /* Schema of the files table (used to qualify column references inside the EXISTS subquery) */ + files_schema?: string; + /* Name of the files table (used to qualify column references inside the EXISTS subquery) */ + files_table: string; + /* Boolean column on the path_shares table that grants the required permission (e.g. can_read, can_write) */ + permission_field: string; + /* Column on the files table referencing the bucket */ + bucket_field?: string; + /* Ltree column on the files table representing the file path */ + path_field?: string; +} /** Direct equality comparison between a table column and the current user ID. Simplest authorization pattern with no subqueries. */ export interface AuthzDirectOwnerParams { /* Column name containing the owner user ID (e.g., owner_id) */ @@ -825,7 +1071,7 @@ export interface BlueprintField { /** An RLS policy entry for a blueprint table. Uses $type to match the blueprint JSON convention. */ export interface BlueprintPolicy { /** Authz* policy type name (e.g., "AuthzDirectOwner", "AuthzAllowAll"). */ - $type: 'AuthzAllowAll' | 'AuthzAppMembership' | 'AuthzComposite' | 'AuthzDenyAll' | 'AuthzDirectOwner' | 'AuthzDirectOwnerAny' | 'AuthzEntityMembership' | 'AuthzMemberList' | 'AuthzNotReadOnly' | 'AuthzOrgHierarchy' | 'AuthzPeerOwnership' | 'AuthzPublishable' | 'AuthzRelatedEntityMembership' | 'AuthzRelatedMemberList' | 'AuthzRelatedPeerOwnership' | 'AuthzTemporal'; + $type: 'AuthzAllowAll' | 'AuthzAppMembership' | 'AuthzComposite' | 'AuthzDenyAll' | 'AuthzFilePath' | 'AuthzDirectOwner' | 'AuthzDirectOwnerAny' | 'AuthzEntityMembership' | 'AuthzMemberList' | 'AuthzNotReadOnly' | 'AuthzOrgHierarchy' | 'AuthzPeerOwnership' | 'AuthzPublishable' | 'AuthzRelatedEntityMembership' | 'AuthzRelatedMemberList' | 'AuthzRelatedPeerOwnership' | 'AuthzTemporal'; /** Privileges this policy applies to (e.g., ["select"], ["insert", "update", "delete"]). */ privileges?: string[]; /** Whether this policy is permissive (true) or restrictive (false). Defaults to true. */ @@ -1009,7 +1255,7 @@ export interface BlueprintEntityType { */ ; /** String shorthand -- just the node type name. */ -export type BlueprintNodeShorthand = 'AuthzAllowAll' | 'AuthzAppMembership' | 'AuthzComposite' | 'AuthzDenyAll' | 'AuthzDirectOwner' | 'AuthzDirectOwnerAny' | 'AuthzEntityMembership' | 'AuthzMemberList' | 'AuthzNotReadOnly' | 'AuthzOrgHierarchy' | 'AuthzPeerOwnership' | 'AuthzPublishable' | 'AuthzRelatedEntityMembership' | 'AuthzRelatedMemberList' | 'AuthzRelatedPeerOwnership' | 'AuthzTemporal' | 'DataCompositeField' | 'DataDirectOwner' | 'DataEntityMembership' | 'DataFeatureFlag' | 'DataForceCurrentUser' | 'DataId' | 'DataImageEmbedding' | 'DataImmutableFields' | 'DataInflection' | 'DataInheritFromParent' | 'DataJobTrigger' | 'DataLimitCounter' | 'DataJsonb' | 'DataOwnedFields' | 'DataOwnershipInEntity' | 'DataPeoplestamps' | 'DataPublishable' | 'DataSlug' | 'DataSoftDelete' | 'DataStatusField' | 'DataTags' | 'DataTimestamps' | 'SearchBm25' | 'SearchFullText' | 'SearchSpatial' | 'SearchSpatialAggregate' | 'SearchTrgm' | 'SearchUnified' | 'SearchVector' | 'TableOrganizationSettings' | 'TableUserProfiles' | 'TableUserSettings'; +export type BlueprintNodeShorthand = 'AuthzAllowAll' | 'AuthzAppMembership' | 'AuthzComposite' | 'AuthzDenyAll' | 'AuthzFilePath' | 'AuthzDirectOwner' | 'AuthzDirectOwnerAny' | 'AuthzEntityMembership' | 'AuthzMemberList' | 'AuthzNotReadOnly' | 'AuthzOrgHierarchy' | 'AuthzPeerOwnership' | 'AuthzPublishable' | 'AuthzRelatedEntityMembership' | 'AuthzRelatedMemberList' | 'AuthzRelatedPeerOwnership' | 'AuthzTemporal' | 'CheckGreaterThan' | 'CheckLessThan' | 'CheckNotEqual' | 'CheckOneOf' | 'DataAggregateLimitCounter' | 'DataBillingMeter' | 'DataBulk' | 'ProcessChunks' | 'DataCompositeField' | 'DataDirectOwner' | 'DataEntityMembership' | 'ProcessFileEmbedding' | 'DataFeatureFlag' | 'DataForceCurrentUser' | 'DataId' | 'ProcessImageEmbedding' | 'DataImmutableFields' | 'DataInflection' | 'DataInheritFromParent' | 'JobTrigger' | 'DataLimitCounter' | 'DataJsonb' | 'DataOwnedFields' | 'ProcessExtraction' | 'ProcessImageVersions' | 'DataOwnershipInEntity' | 'DataPeoplestamps' | 'DataPublishable' | 'DataRealtime' | 'DataSlug' | 'DataSoftDelete' | 'DataStatusField' | 'DataTags' | 'DataTimestamps' | 'SearchBm25' | 'SearchFullText' | 'SearchSpatial' | 'SearchSpatialAggregate' | 'SearchTrgm' | 'SearchUnified' | 'SearchVector' | 'TableOrganizationSettings' | 'TableUserProfiles' | 'TableUserSettings'; /** Object form -- { $type, data } with typed parameters. */ export type BlueprintNodeObject = { $type: 'AuthzAllowAll'; @@ -1023,6 +1269,9 @@ export type BlueprintNodeObject = { } | { $type: 'AuthzDenyAll'; data?: Record; +} | { + $type: 'AuthzFilePath'; + data: AuthzFilePathParams; } | { $type: 'AuthzDirectOwner'; data: AuthzDirectOwnerParams; @@ -1059,6 +1308,30 @@ export type BlueprintNodeObject = { } | { $type: 'AuthzTemporal'; data: AuthzTemporalParams; +} | { + $type: 'CheckGreaterThan'; + data: CheckGreaterThanParams; +} | { + $type: 'CheckLessThan'; + data: CheckLessThanParams; +} | { + $type: 'CheckNotEqual'; + data: CheckNotEqualParams; +} | { + $type: 'CheckOneOf'; + data: CheckOneOfParams; +} | { + $type: 'DataAggregateLimitCounter'; + data: DataAggregateLimitCounterParams; +} | { + $type: 'DataBillingMeter'; + data: DataBillingMeterParams; +} | { + $type: 'DataBulk'; + data: DataBulkParams; +} | { + $type: 'ProcessChunks'; + data: ProcessChunksParams; } | { $type: 'DataCompositeField'; data: DataCompositeFieldParams; @@ -1068,6 +1341,9 @@ export type BlueprintNodeObject = { } | { $type: 'DataEntityMembership'; data: DataEntityMembershipParams; +} | { + $type: 'ProcessFileEmbedding'; + data: ProcessFileEmbeddingParams; } | { $type: 'DataFeatureFlag'; data: DataFeatureFlagParams; @@ -1078,8 +1354,8 @@ export type BlueprintNodeObject = { $type: 'DataId'; data: DataIdParams; } | { - $type: 'DataImageEmbedding'; - data: DataImageEmbeddingParams; + $type: 'ProcessImageEmbedding'; + data: ProcessImageEmbeddingParams; } | { $type: 'DataImmutableFields'; data: DataImmutableFieldsParams; @@ -1090,8 +1366,8 @@ export type BlueprintNodeObject = { $type: 'DataInheritFromParent'; data: DataInheritFromParentParams; } | { - $type: 'DataJobTrigger'; - data: DataJobTriggerParams; + $type: 'JobTrigger'; + data: JobTriggerParams; } | { $type: 'DataLimitCounter'; data: DataLimitCounterParams; @@ -1101,6 +1377,12 @@ export type BlueprintNodeObject = { } | { $type: 'DataOwnedFields'; data: DataOwnedFieldsParams; +} | { + $type: 'ProcessExtraction'; + data: ProcessExtractionParams; +} | { + $type: 'ProcessImageVersions'; + data: ProcessImageVersionsParams; } | { $type: 'DataOwnershipInEntity'; data: DataOwnershipInEntityParams; @@ -1110,6 +1392,9 @@ export type BlueprintNodeObject = { } | { $type: 'DataPublishable'; data: DataPublishableParams; +} | { + $type: 'DataRealtime'; + data: DataRealtimeParams; } | { $type: 'DataSlug'; data: DataSlugParams; diff --git a/packages/node-type-registry/src/codegen/generate-types.ts b/packages/node-type-registry/src/codegen/generate-types.ts index e8bd04df4..9e4d3281c 100644 --- a/packages/node-type-registry/src/codegen/generate-types.ts +++ b/packages/node-type-registry/src/codegen/generate-types.ts @@ -1131,7 +1131,7 @@ function buildProgram(meta?: MetaTableInfo[]): string { statements.push(buildTriggerConditionInterface()); // -- Parameter interfaces grouped by category -- - const categoryOrder = ['data', 'search', 'authz', 'relation', 'view']; + const categoryOrder = ['check', 'data', 'search', 'job', 'process', 'authz', 'relation', 'view']; for (const cat of categoryOrder) { const nts = categories.get(cat); if (!nts || nts.length === 0) continue; diff --git a/packages/node-type-registry/src/data/data-chunks.ts b/packages/node-type-registry/src/data/data-chunks.ts index 9e89d56e1..6305a1a88 100644 --- a/packages/node-type-registry/src/data/data-chunks.ts +++ b/packages/node-type-registry/src/data/data-chunks.ts @@ -12,20 +12,20 @@ import type { NodeTypeDefinition } from '../types'; * - RLS policies inherited from parent * - Optional job trigger for automatic chunking on INSERT/UPDATE * - * This node is also composed internally by DataFileEmbedding (enabled by + * This node is also composed internally by ProcessFileEmbedding (enabled by * default in extract mode). Use it standalone when you want a chunks table * without the full file-embedding pipeline. */ -export const DataChunks: NodeTypeDefinition = { - name: 'DataChunks', +export const ProcessChunks: NodeTypeDefinition = { + name: 'ProcessChunks', slug: 'data_chunks', - category: 'data', + category: 'process', display_name: 'Chunks', description: 'Creates a chunked-embedding child table for any parent table. ' + 'Provisions the chunks table with content, chunk_index, embedding vector, ' + 'metadata, HNSW index, inherited RLS, and optional job trigger for ' + - 'automatic text splitting. Composed internally by DataFileEmbedding ' + + 'automatic text splitting. Composed internally by ProcessFileEmbedding ' + '(enabled by default in extract mode) but can also be used standalone.', parameter_schema: { type: 'object', diff --git a/packages/node-type-registry/src/data/data-file-embedding.ts b/packages/node-type-registry/src/data/data-file-embedding.ts index e57deaf4e..2f91d9f22 100644 --- a/packages/node-type-registry/src/data/data-file-embedding.ts +++ b/packages/node-type-registry/src/data/data-file-embedding.ts @@ -1,16 +1,16 @@ import type { NodeTypeDefinition } from '../types'; -export const DataFileEmbedding: NodeTypeDefinition = { - name: 'DataFileEmbedding', +export const ProcessFileEmbedding: NodeTypeDefinition = { + name: 'ProcessFileEmbedding', slug: 'data_file_embedding', - category: 'data', + category: 'process', display_name: 'File Embedding', description: 'Generic, MIME-scoped embedding node for file tables. Supports two modes: ' + 'direct (whole-file to single vector, e.g. CLIP for images) when extraction ' + 'is omitted, or extract (file to text to chunks to per-chunk vectors) when ' + - 'extraction config is provided. Composes SearchVector + DataJobTrigger + ' + - 'DataChunks (enabled by default in extract mode) internally. Multiple ' + + 'extraction config is provided. Composes SearchVector + JobTrigger + ' + + 'ProcessChunks (enabled by default in extract mode) internally. Multiple ' + 'instances can coexist on the same table with different MIME scopes, field ' + 'names, and embedding strategies.', parameter_schema: { @@ -122,14 +122,14 @@ export const DataFileEmbedding: NodeTypeDefinition = { include_chunks: { type: 'boolean', description: - 'Whether to create a chunks table via DataChunks. Defaults to true ' + + 'Whether to create a chunks table via ProcessChunks. Defaults to true ' + 'when extraction is provided, false in direct mode. Set explicitly ' + 'to override.', }, chunks: { type: 'object', description: - 'Chunking configuration passed through to DataChunks. When ' + + 'Chunking configuration passed through to ProcessChunks. When ' + 'include_chunks is true (or defaults to true in extract mode), these ' + 'params configure the chunks table, embedding dimensions, strategy, etc.', properties: { diff --git a/packages/node-type-registry/src/data/data-image-embedding.ts b/packages/node-type-registry/src/data/data-image-embedding.ts index f9905850b..e562d74f9 100644 --- a/packages/node-type-registry/src/data/data-image-embedding.ts +++ b/packages/node-type-registry/src/data/data-image-embedding.ts @@ -1,32 +1,32 @@ import type { NodeTypeDefinition } from '../types'; /** - * Image-specific preset of DataFileEmbedding. + * Image-specific preset of ProcessFileEmbedding. * * At the SQL layer, data_image_embedding delegates entirely to * data_file_embedding, merging image-specific defaults before forwarding. - * The parameter schema here is intentionally identical to DataFileEmbedding; + * The parameter schema here is intentionally identical to ProcessFileEmbedding; * only the defaults differ (dimensions: 512, task: process_image_embedding, * mime_patterns: ['image/%']). * * Kept as a separate node type for backward compatibility — existing - * blueprints that reference DataImageEmbedding continue to work unchanged. + * blueprints that reference ProcessImageEmbedding continue to work unchanged. */ -export const DataImageEmbedding: NodeTypeDefinition = { - name: 'DataImageEmbedding', +export const ProcessImageEmbedding: NodeTypeDefinition = { + name: 'ProcessImageEmbedding', slug: 'data_image_embedding', - category: 'data', + category: 'process', display_name: 'Image Embedding', description: - 'Image-specific preset of DataFileEmbedding. Delegates to DataFileEmbedding ' + + 'Image-specific preset of ProcessFileEmbedding. Delegates to ProcessFileEmbedding ' + 'with image-oriented defaults: dimensions=512 (CLIP), mime_patterns=[\'image/%\'], ' + 'task_identifier=\'process_image_embedding\', direct mode (no extraction). ' + - 'Accepts all DataFileEmbedding parameters — any overrides are forwarded through.', + 'Accepts all ProcessFileEmbedding parameters — any overrides are forwarded through.', parameter_schema: { type: 'object', properties: { - // ── Vector config (passed through to DataFileEmbedding) ────────── + // ── Vector config (passed through to ProcessFileEmbedding) ────────── field_name: { type: 'string', format: 'column-ref', @@ -103,7 +103,7 @@ export const DataImageEmbedding: NodeTypeDefinition = { extraction: { type: 'object', description: - 'Text extraction configuration. Forwarded to DataFileEmbedding. ' + + 'Text extraction configuration. Forwarded to ProcessFileEmbedding. ' + 'When present, enables extract mode (e.g., OCR for images).', properties: { text_field: { @@ -121,11 +121,11 @@ export const DataImageEmbedding: NodeTypeDefinition = { } }, - // ── Chunking config (optional — forwarded to DataFileEmbedding) ─ + // ── Chunking config (optional — forwarded to ProcessFileEmbedding) ─ chunks: { type: 'object', description: - 'Chunking configuration. Forwarded to DataFileEmbedding. ' + + 'Chunking configuration. Forwarded to ProcessFileEmbedding. ' + 'Only meaningful when extraction is also provided.', properties: { content_field_name: { diff --git a/packages/node-type-registry/src/data/data-job-trigger.ts b/packages/node-type-registry/src/data/data-job-trigger.ts index 490b7d640..b7ceb1554 100644 --- a/packages/node-type-registry/src/data/data-job-trigger.ts +++ b/packages/node-type-registry/src/data/data-job-trigger.ts @@ -26,10 +26,10 @@ const triggerConditionSchema = { } }; -export const DataJobTrigger: NodeTypeDefinition = { - name: 'DataJobTrigger', +export const JobTrigger: NodeTypeDefinition = { + name: 'JobTrigger', slug: 'data_job_trigger', - category: 'data', + category: 'job', display_name: 'Job Trigger', description: 'Dynamically creates PostgreSQL triggers that enqueue jobs via app_jobs.add_job() when table rows are inserted, updated, or deleted. Supports configurable payload strategies (full row, row ID, selected fields, or custom mapping), conditional firing via WHEN clauses, watched field changes, and extended job options (queue, priority, delay, max attempts).', parameter_schema: { diff --git a/packages/node-type-registry/src/data/index.ts b/packages/node-type-registry/src/data/index.ts index 9edbe8103..b543ef4fd 100644 --- a/packages/node-type-registry/src/data/index.ts +++ b/packages/node-type-registry/src/data/index.ts @@ -5,22 +5,24 @@ export { CheckOneOf } from './check-one-of'; export { DataAggregateLimitCounter } from './data-aggregate-limit-counter'; export { DataBillingMeter } from './data-billing-meter'; export { DataBulk } from './data-bulk'; -export { DataChunks } from './data-chunks'; +export { ProcessChunks } from './data-chunks'; export { DataCompositeField } from './data-composite-field'; export { DataDirectOwner } from './data-direct-owner'; export { DataEntityMembership } from './data-entity-membership'; -export { DataFileEmbedding } from './data-file-embedding'; +export { ProcessFileEmbedding } from './data-file-embedding'; export { DataFeatureFlag } from './data-feature-flag'; export { DataForceCurrentUser } from './data-force-current-user'; export { DataId } from './data-id'; -export { DataImageEmbedding } from './data-image-embedding'; +export { ProcessImageEmbedding } from './data-image-embedding'; export { DataImmutableFields } from './data-immutable-fields'; export { DataInflection } from './data-inflection'; export { DataInheritFromParent } from './data-inherit-from-parent'; -export { DataJobTrigger } from './data-job-trigger'; +export { JobTrigger } from './data-job-trigger'; export { DataLimitCounter } from './data-limit-counter'; export { DataJsonb } from './data-jsonb'; export { DataOwnedFields } from './data-owned-fields'; +export { ProcessExtraction } from './process-extraction'; +export { ProcessImageVersions } from './process-image-versions'; export { DataOwnershipInEntity } from './data-ownership-in-entity'; export { DataPeoplestamps } from './data-peoplestamps'; export { DataPublishable } from './data-publishable'; diff --git a/packages/node-type-registry/src/data/process-extraction.ts b/packages/node-type-registry/src/data/process-extraction.ts new file mode 100644 index 000000000..2b73276fd --- /dev/null +++ b/packages/node-type-registry/src/data/process-extraction.ts @@ -0,0 +1,114 @@ +import type { NodeTypeDefinition } from '../types'; + +/** + * File extraction processing node. + * + * Composes a JobTrigger that fires when a file transitions to status = 'uploaded' + * (or on INSERT if confirm_upload is not enabled). The trigger enqueues a + * text-extraction job that converts the file contents (PDF, DOCX, HTML, etc.) + * into plain text or markdown, storing the result in configurable output fields. + * + * The extraction worker is external (Knative function) — this node only creates + * the trigger infrastructure and output fields. The worker calls back into the + * database to write extracted text and metadata. + */ +export const ProcessExtraction: NodeTypeDefinition = { + name: 'ProcessExtraction', + slug: 'process_extraction', + category: 'process', + display_name: 'File Extraction', + description: + 'Creates extraction output fields and a job trigger for file text extraction. ' + + 'Fires when a file is uploaded (status = \'uploaded\') or on INSERT. ' + + 'The external worker extracts text/metadata from the file (PDF, DOCX, HTML, etc.) ' + + 'and writes the result back to the configured output fields. ' + + 'Typically used upstream of ProcessFileEmbedding or ProcessChunks.', + parameter_schema: { + type: 'object', + properties: { + + // ── Output fields ───────────────────────────────────────────── + text_field: { + type: 'string', + format: 'column-ref', + description: 'Field to store extracted text/markdown', + default: 'extracted_text' + }, + metadata_field: { + type: 'string', + format: 'column-ref', + description: 'JSONB field for extraction metadata (page count, language, etc.)', + default: 'extracted_metadata' + }, + + // ── MIME scoping ────────────────────────────────────────────── + mime_patterns: { + type: 'array', + items: { type: 'string' }, + description: + 'MIME type LIKE patterns to match. Multiple patterns are OR\'d together. ' + + 'Examples: [\'application/pdf\', \'text/%\'], [\'application/vnd.openxmlformats%\'].', + default: ['application/pdf', 'text/%'] + }, + + // ── Job routing ─────────────────────────────────────────────── + task_identifier: { + type: 'string', + description: 'Job task identifier for the extraction worker', + default: 'extract_file_text' + }, + events: { + type: 'array', + items: { type: 'string', enum: ['INSERT', 'UPDATE'] }, + description: 'Trigger events that fire the job', + default: ['INSERT'] + }, + payload_custom: { + type: 'object', + additionalProperties: { type: 'string', format: 'column-ref' }, + description: 'Custom payload key-to-column mapping for the job trigger', + default: { + file_id: 'id', + key: 'key', + mime_type: 'mime_type', + bucket_id: 'bucket_id' + } + }, + trigger_conditions: { + description: + 'Additional compound conditions beyond MIME filtering. ' + + 'Merged with the auto-generated MIME conditions via AND. ' + + 'Use this to add status checks (e.g., status = \'uploaded\').', + 'x-codegen-type': 'TriggerCondition | TriggerCondition[]', + oneOf: [ + { $ref: '#/$defs/triggerCondition' }, + { type: 'array', items: { $ref: '#/$defs/triggerCondition' } } + ] + }, + + // ── Job options ─────────────────────────────────────────────── + queue_name: { + type: 'string', + description: 'Job queue name for extraction tasks', + default: 'extraction' + }, + max_attempts: { + type: 'integer', + description: 'Maximum number of retry attempts', + default: 5 + }, + priority: { + type: 'integer', + description: 'Job priority (lower = higher priority)', + default: 0 + } + } + }, + tags: [ + 'extraction', + 'files', + 'processing', + 'jobs', + 'text' + ] +}; diff --git a/packages/node-type-registry/src/data/process-image-versions.ts b/packages/node-type-registry/src/data/process-image-versions.ts new file mode 100644 index 000000000..5cec5699b --- /dev/null +++ b/packages/node-type-registry/src/data/process-image-versions.ts @@ -0,0 +1,146 @@ +import type { NodeTypeDefinition } from '../types'; + +/** + * Image version processing node. + * + * Composes a JobTrigger that fires when an image file transitions to + * status = 'uploaded' (or on INSERT if confirm_upload is not enabled). + * The trigger enqueues an image-processing job that generates resized, + * cropped, or reformatted variants of the source image. + * + * The image processing worker is external (Knative function) — this node + * only creates the trigger infrastructure. The worker generates the variants + * and writes them back to the storage system as new file records linked to + * the source file. + */ +export const ProcessImageVersions: NodeTypeDefinition = { + name: 'ProcessImageVersions', + slug: 'process_image_versions', + category: 'process', + display_name: 'Image Versions', + description: + 'Creates a job trigger for image variant generation. ' + + 'Fires when an image file is uploaded (status = \'uploaded\') or on INSERT. ' + + 'The external worker generates resized, cropped, or reformatted versions ' + + '(thumbnails, previews, WebP conversions, etc.) and stores them as new ' + + 'file records linked to the source image.', + parameter_schema: { + type: 'object', + properties: { + + // ── Version definitions ─────────────────────────────────────── + versions: { + type: 'array', + items: { + type: 'object', + properties: { + name: { + type: 'string', + description: 'Version identifier (e.g., "thumb", "preview", "hero")' + }, + width: { + type: 'integer', + description: 'Target width in pixels' + }, + height: { + type: 'integer', + description: 'Target height in pixels' + }, + fit: { + type: 'string', + enum: ['cover', 'contain', 'fill', 'inside', 'outside'], + description: 'Resize fitting strategy', + default: 'cover' + }, + format: { + type: 'string', + enum: ['jpeg', 'png', 'webp', 'avif'], + description: 'Output image format', + default: 'webp' + }, + quality: { + type: 'integer', + description: 'Output quality (1-100)', + default: 80 + } + }, + required: ['name'] + }, + description: + 'Array of version definitions. Each version specifies dimensions, ' + + 'format, and quality for a generated image variant.', + default: [ + { name: 'thumb', width: 150, height: 150, fit: 'cover', format: 'webp', quality: 80 }, + { name: 'preview', width: 800, height: 600, fit: 'inside', format: 'webp', quality: 85 } + ] + }, + + // ── MIME scoping ────────────────────────────────────────────── + mime_patterns: { + type: 'array', + items: { type: 'string' }, + description: 'MIME type LIKE patterns to match. Defaults to all image types.', + default: ['image/%'] + }, + + // ── Job routing ─────────────────────────────────────────────── + task_identifier: { + type: 'string', + description: 'Job task identifier for the image processing worker', + default: 'process_image_versions' + }, + events: { + type: 'array', + items: { type: 'string', enum: ['INSERT', 'UPDATE'] }, + description: 'Trigger events that fire the job', + default: ['INSERT'] + }, + payload_custom: { + type: 'object', + additionalProperties: { type: 'string', format: 'column-ref' }, + description: 'Custom payload key-to-column mapping for the job trigger', + default: { + file_id: 'id', + key: 'key', + mime_type: 'mime_type', + bucket_id: 'bucket_id' + } + }, + trigger_conditions: { + description: + 'Additional compound conditions beyond MIME filtering. ' + + 'Merged with the auto-generated MIME conditions via AND.', + 'x-codegen-type': 'TriggerCondition | TriggerCondition[]', + oneOf: [ + { $ref: '#/$defs/triggerCondition' }, + { type: 'array', items: { $ref: '#/$defs/triggerCondition' } } + ] + }, + + // ── Job options ─────────────────────────────────────────────── + queue_name: { + type: 'string', + description: 'Job queue name for image processing tasks', + default: 'image_processing' + }, + max_attempts: { + type: 'integer', + description: 'Maximum number of retry attempts', + default: 5 + }, + priority: { + type: 'integer', + description: 'Job priority (lower = higher priority)', + default: 0 + } + } + }, + tags: [ + 'images', + 'processing', + 'jobs', + 'resize', + 'thumbnails', + 'files' + ] +}; From 3d5d0282a686d20457f18d635ce7e8745e5c9dad Mon Sep 17 00:00:00 2001 From: Dan Lynch Date: Thu, 14 May 2026 20:06:30 +0000 Subject: [PATCH 2/4] =?UTF-8?q?refactor:=20rename=20Limit*/Billing*=20node?= =?UTF-8?q?=20types=20(DataLimitCounter=20=E2=86=92=20LimitCounter,=20Data?= =?UTF-8?q?AggregateLimitCounter=20=E2=86=92=20LimitAggregate,=20DataFeatu?= =?UTF-8?q?reFlag=20=E2=86=92=20LimitFeatureFlag,=20DataBillingMeter=20?= =?UTF-8?q?=E2=86=92=20BillingMeter)=20(#858)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/blueprint-types.generated.ts | 110 ++++++++++-------- .../src/codegen/generate-types.ts | 2 +- .../src/data/data-aggregate-limit-counter.ts | 6 +- .../src/data/data-billing-meter.ts | 6 +- .../src/data/data-feature-flag.ts | 6 +- .../src/data/data-limit-counter.ts | 6 +- packages/node-type-registry/src/data/index.ts | 8 +- 7 files changed, 78 insertions(+), 66 deletions(-) diff --git a/packages/node-type-registry/src/blueprint-types.generated.ts b/packages/node-type-registry/src/blueprint-types.generated.ts index 53cb6baf2..e60b1b280 100644 --- a/packages/node-type-registry/src/blueprint-types.generated.ts +++ b/packages/node-type-registry/src/blueprint-types.generated.ts @@ -35,6 +35,23 @@ export interface TriggerCondition { /** Negated condition. */ NOT?: TriggerCondition; } +/** + * =========================================================================== + * Billing node type parameters + * =========================================================================== + */ +; +/** Declaratively attaches billing usage-recording triggers to a table. On INSERT the named meter is incremented via record_usage; on DELETE it is decremented (reversal). On UPDATE, if the entity_field changes, the old entity is decremented and the new entity is incremented. Requires a provisioned billing_module for the target database. */ +export interface BillingMeterParams { + /* Slug of the billing meter to record usage against (must match a meters table entry, e.g. "databases", "seats") */ + meter_slug: string; + /* Column on the target table that holds the entity id for billing */ + entity_field?: string; + /* Units to record per event (default 1) */ + quantity?: number; + /* Which DML events to attach triggers for */ + events?: ('INSERT' | 'DELETE' | 'UPDATE')[]; +} /** * =========================================================================== * Check node type parameters @@ -77,26 +94,6 @@ export interface CheckOneOfParams { * =========================================================================== */ ; -/** Declaratively attaches aggregate limit-tracking triggers to a table. On INSERT the named limit is incremented per entity; on DELETE it is decremented. Uses org_limit_aggregates_inc/dec for per-entity (org-level) aggregate limits rather than per-user limits. Requires a provisioned limits_module for the target database. */ -export interface DataAggregateLimitCounterParams { - /* Name of the aggregate limit to track (must match a default_limits entry, e.g. "databases", "members") */ - limit_name: string; - /* Column on the target table that holds the entity id for aggregate limit lookup */ - entity_field?: string; - /* Which DML events to attach triggers for */ - events?: ('INSERT' | 'DELETE' | 'UPDATE')[]; -} -/** Declaratively attaches billing usage-recording triggers to a table. On INSERT the named meter is incremented via record_usage; on DELETE it is decremented (reversal). On UPDATE, if the entity_field changes, the old entity is decremented and the new entity is incremented. Requires a provisioned billing_module for the target database. */ -export interface DataBillingMeterParams { - /* Slug of the billing meter to record usage against (must match a meters table entry, e.g. "databases", "seats") */ - meter_slug: string; - /* Column on the target table that holds the entity id for billing */ - entity_field?: string; - /* Units to record per event (default 1) */ - quantity?: number; - /* Which DML events to attach triggers for */ - events?: ('INSERT' | 'DELETE' | 'UPDATE')[]; -} /** Enables bulk mutation smart tags on a table. When provisioned, adds @behavior tags for the selected bulk operations (insert, upsert, update, delete). Requires the graphile-bulk-mutations plugin. */ export interface DataBulkParams { /* Enable bulk insert (+bulkInsert) */ @@ -135,15 +132,6 @@ export interface DataEntityMembershipParams { /* If true, adds a foreign key constraint from entity_id to the users table */ include_user_fk?: boolean; } -/** Gates a table behind a feature flag backed by the cap tables. Attaches a BEFORE INSERT trigger that checks whether the named feature cap value is > 0. Features are modeled as caps with max=0 (disabled) or max=1 (enabled) in limit_caps / limit_caps_defaults tables. Resolution: COALESCE(per-entity cap, scope default, 0). */ -export interface DataFeatureFlagParams { - /* Cap name representing this feature (must match a limit_caps_defaults entry with max=0 or max=1) */ - feature_name: string; - /* Feature scope: "app" (membership_type=1, app-level caps) or "org" (membership_type=2, per-entity caps) */ - scope?: 'app' | 'org'; - /* Column on the target table that holds the entity id for per-entity cap lookups (only used for org scope) */ - entity_field?: string; -} /** BEFORE INSERT trigger that forces a field to the value of jwt_public.current_user_id(). Prevents clients from spoofing the actor/uploader identity. The field value is always overwritten regardless of what the client provides. */ export interface DataForceCurrentUserParams { /* Name of the field to force to current_user_id() */ @@ -177,17 +165,6 @@ export interface DataInheritFromParentParams { /* Parent table schema (optional, defaults to same schema as child table) */ parent_schema?: string; } -/** Declaratively attaches limit-tracking triggers to a table. On INSERT the named limit is incremented; on DELETE it is decremented. Requires a provisioned limits_module for the target scope. */ -export interface DataLimitCounterParams { - /* Name of the limit to track (must match a default_limits entry, e.g. "projects", "members") */ - limit_name: string; - /* Limit scope: "app" (membership_type=1, user-level) or "org" (membership_type=2, entity-level) */ - scope?: 'app' | 'org'; - /* Column on the target table that holds the actor or entity id used for limit lookup */ - actor_field?: string; - /* Which DML events to attach triggers for */ - events?: ('INSERT' | 'DELETE' | 'UPDATE')[]; -} /** Adds a JSONB column with optional GIN index for containment queries (@>, ?, ?|, ?&). Standard pattern for semi-structured metadata. */ export interface DataJsonbParams { /* Column name for the JSONB field */ @@ -297,6 +274,41 @@ export type TableOrganizationSettingsParams = {}; export type TableUserProfilesParams = {}; /** Creates a user settings table for user-specific configuration. Uses AuthzDirectOwner for access control. */ export type TableUserSettingsParams = {}; +/** + * =========================================================================== + * Limit node type parameters + * =========================================================================== + */ +; +/** Declaratively attaches aggregate limit-tracking triggers to a table. On INSERT the named limit is incremented per entity; on DELETE it is decremented. Uses org_limit_aggregates_inc/dec for per-entity (org-level) aggregate limits rather than per-user limits. Requires a provisioned limits_module for the target database. */ +export interface LimitAggregateParams { + /* Name of the aggregate limit to track (must match a default_limits entry, e.g. "databases", "members") */ + limit_name: string; + /* Column on the target table that holds the entity id for aggregate limit lookup */ + entity_field?: string; + /* Which DML events to attach triggers for */ + events?: ('INSERT' | 'DELETE' | 'UPDATE')[]; +} +/** Gates a table behind a feature flag backed by the cap tables. Attaches a BEFORE INSERT trigger that checks whether the named feature cap value is > 0. Features are modeled as caps with max=0 (disabled) or max=1 (enabled) in limit_caps / limit_caps_defaults tables. Resolution: COALESCE(per-entity cap, scope default, 0). */ +export interface LimitFeatureFlagParams { + /* Cap name representing this feature (must match a limit_caps_defaults entry with max=0 or max=1) */ + feature_name: string; + /* Feature scope: "app" (membership_type=1, app-level caps) or "org" (membership_type=2, per-entity caps) */ + scope?: 'app' | 'org'; + /* Column on the target table that holds the entity id for per-entity cap lookups (only used for org scope) */ + entity_field?: string; +} +/** Declaratively attaches limit-tracking triggers to a table. On INSERT the named limit is incremented; on DELETE it is decremented. Requires a provisioned limits_module for the target scope. */ +export interface LimitCounterParams { + /* Name of the limit to track (must match a default_limits entry, e.g. "projects", "members") */ + limit_name: string; + /* Limit scope: "app" (membership_type=1, user-level) or "org" (membership_type=2, entity-level) */ + scope?: 'app' | 'org'; + /* Column on the target table that holds the actor or entity id used for limit lookup */ + actor_field?: string; + /* Which DML events to attach triggers for */ + events?: ('INSERT' | 'DELETE' | 'UPDATE')[]; +} /** * =========================================================================== * Search node type parameters @@ -1255,7 +1267,7 @@ export interface BlueprintEntityType { */ ; /** String shorthand -- just the node type name. */ -export type BlueprintNodeShorthand = 'AuthzAllowAll' | 'AuthzAppMembership' | 'AuthzComposite' | 'AuthzDenyAll' | 'AuthzFilePath' | 'AuthzDirectOwner' | 'AuthzDirectOwnerAny' | 'AuthzEntityMembership' | 'AuthzMemberList' | 'AuthzNotReadOnly' | 'AuthzOrgHierarchy' | 'AuthzPeerOwnership' | 'AuthzPublishable' | 'AuthzRelatedEntityMembership' | 'AuthzRelatedMemberList' | 'AuthzRelatedPeerOwnership' | 'AuthzTemporal' | 'CheckGreaterThan' | 'CheckLessThan' | 'CheckNotEqual' | 'CheckOneOf' | 'DataAggregateLimitCounter' | 'DataBillingMeter' | 'DataBulk' | 'ProcessChunks' | 'DataCompositeField' | 'DataDirectOwner' | 'DataEntityMembership' | 'ProcessFileEmbedding' | 'DataFeatureFlag' | 'DataForceCurrentUser' | 'DataId' | 'ProcessImageEmbedding' | 'DataImmutableFields' | 'DataInflection' | 'DataInheritFromParent' | 'JobTrigger' | 'DataLimitCounter' | 'DataJsonb' | 'DataOwnedFields' | 'ProcessExtraction' | 'ProcessImageVersions' | 'DataOwnershipInEntity' | 'DataPeoplestamps' | 'DataPublishable' | 'DataRealtime' | 'DataSlug' | 'DataSoftDelete' | 'DataStatusField' | 'DataTags' | 'DataTimestamps' | 'SearchBm25' | 'SearchFullText' | 'SearchSpatial' | 'SearchSpatialAggregate' | 'SearchTrgm' | 'SearchUnified' | 'SearchVector' | 'TableOrganizationSettings' | 'TableUserProfiles' | 'TableUserSettings'; +export type BlueprintNodeShorthand = 'AuthzAllowAll' | 'AuthzAppMembership' | 'AuthzComposite' | 'AuthzDenyAll' | 'AuthzFilePath' | 'AuthzDirectOwner' | 'AuthzDirectOwnerAny' | 'AuthzEntityMembership' | 'AuthzMemberList' | 'AuthzNotReadOnly' | 'AuthzOrgHierarchy' | 'AuthzPeerOwnership' | 'AuthzPublishable' | 'AuthzRelatedEntityMembership' | 'AuthzRelatedMemberList' | 'AuthzRelatedPeerOwnership' | 'AuthzTemporal' | 'CheckGreaterThan' | 'CheckLessThan' | 'CheckNotEqual' | 'CheckOneOf' | 'LimitAggregate' | 'BillingMeter' | 'DataBulk' | 'ProcessChunks' | 'DataCompositeField' | 'DataDirectOwner' | 'DataEntityMembership' | 'ProcessFileEmbedding' | 'LimitFeatureFlag' | 'DataForceCurrentUser' | 'DataId' | 'ProcessImageEmbedding' | 'DataImmutableFields' | 'DataInflection' | 'DataInheritFromParent' | 'JobTrigger' | 'LimitCounter' | 'DataJsonb' | 'DataOwnedFields' | 'ProcessExtraction' | 'ProcessImageVersions' | 'DataOwnershipInEntity' | 'DataPeoplestamps' | 'DataPublishable' | 'DataRealtime' | 'DataSlug' | 'DataSoftDelete' | 'DataStatusField' | 'DataTags' | 'DataTimestamps' | 'SearchBm25' | 'SearchFullText' | 'SearchSpatial' | 'SearchSpatialAggregate' | 'SearchTrgm' | 'SearchUnified' | 'SearchVector' | 'TableOrganizationSettings' | 'TableUserProfiles' | 'TableUserSettings'; /** Object form -- { $type, data } with typed parameters. */ export type BlueprintNodeObject = { $type: 'AuthzAllowAll'; @@ -1321,11 +1333,11 @@ export type BlueprintNodeObject = { $type: 'CheckOneOf'; data: CheckOneOfParams; } | { - $type: 'DataAggregateLimitCounter'; - data: DataAggregateLimitCounterParams; + $type: 'LimitAggregate'; + data: LimitAggregateParams; } | { - $type: 'DataBillingMeter'; - data: DataBillingMeterParams; + $type: 'BillingMeter'; + data: BillingMeterParams; } | { $type: 'DataBulk'; data: DataBulkParams; @@ -1345,8 +1357,8 @@ export type BlueprintNodeObject = { $type: 'ProcessFileEmbedding'; data: ProcessFileEmbeddingParams; } | { - $type: 'DataFeatureFlag'; - data: DataFeatureFlagParams; + $type: 'LimitFeatureFlag'; + data: LimitFeatureFlagParams; } | { $type: 'DataForceCurrentUser'; data: DataForceCurrentUserParams; @@ -1369,8 +1381,8 @@ export type BlueprintNodeObject = { $type: 'JobTrigger'; data: JobTriggerParams; } | { - $type: 'DataLimitCounter'; - data: DataLimitCounterParams; + $type: 'LimitCounter'; + data: LimitCounterParams; } | { $type: 'DataJsonb'; data: DataJsonbParams; diff --git a/packages/node-type-registry/src/codegen/generate-types.ts b/packages/node-type-registry/src/codegen/generate-types.ts index 9e4d3281c..2779f07fa 100644 --- a/packages/node-type-registry/src/codegen/generate-types.ts +++ b/packages/node-type-registry/src/codegen/generate-types.ts @@ -1131,7 +1131,7 @@ function buildProgram(meta?: MetaTableInfo[]): string { statements.push(buildTriggerConditionInterface()); // -- Parameter interfaces grouped by category -- - const categoryOrder = ['check', 'data', 'search', 'job', 'process', 'authz', 'relation', 'view']; + const categoryOrder = ['billing', 'check', 'data', 'limit', 'search', 'job', 'process', 'authz', 'relation', 'view']; for (const cat of categoryOrder) { const nts = categories.get(cat); if (!nts || nts.length === 0) continue; diff --git a/packages/node-type-registry/src/data/data-aggregate-limit-counter.ts b/packages/node-type-registry/src/data/data-aggregate-limit-counter.ts index 0b9ba25ab..2a0822732 100644 --- a/packages/node-type-registry/src/data/data-aggregate-limit-counter.ts +++ b/packages/node-type-registry/src/data/data-aggregate-limit-counter.ts @@ -1,9 +1,9 @@ import type { NodeTypeDefinition } from '../types'; -export const DataAggregateLimitCounter: NodeTypeDefinition = { - name: 'DataAggregateLimitCounter', +export const LimitAggregate: NodeTypeDefinition = { + name: 'LimitAggregate', slug: 'data_aggregate_limit_counter', - category: 'data', + category: 'limit', display_name: 'Aggregate Limit Counter', description: 'Declaratively attaches aggregate limit-tracking triggers to a table. On INSERT the named limit is incremented per entity; on DELETE it is decremented. Uses org_limit_aggregates_inc/dec for per-entity (org-level) aggregate limits rather than per-user limits. Requires a provisioned limits_module for the target database.', diff --git a/packages/node-type-registry/src/data/data-billing-meter.ts b/packages/node-type-registry/src/data/data-billing-meter.ts index e03e45642..5e8b979f9 100644 --- a/packages/node-type-registry/src/data/data-billing-meter.ts +++ b/packages/node-type-registry/src/data/data-billing-meter.ts @@ -1,9 +1,9 @@ import type { NodeTypeDefinition } from '../types'; -export const DataBillingMeter: NodeTypeDefinition = { - name: 'DataBillingMeter', +export const BillingMeter: NodeTypeDefinition = { + name: 'BillingMeter', slug: 'data_billing_meter', - category: 'data', + category: 'billing', display_name: 'Billing Meter', description: 'Declaratively attaches billing usage-recording triggers to a table. On INSERT the named meter is incremented via record_usage; on DELETE it is decremented (reversal). On UPDATE, if the entity_field changes, the old entity is decremented and the new entity is incremented. Requires a provisioned billing_module for the target database.', diff --git a/packages/node-type-registry/src/data/data-feature-flag.ts b/packages/node-type-registry/src/data/data-feature-flag.ts index bd31de956..e96517382 100644 --- a/packages/node-type-registry/src/data/data-feature-flag.ts +++ b/packages/node-type-registry/src/data/data-feature-flag.ts @@ -1,9 +1,9 @@ import type { NodeTypeDefinition } from '../types'; -export const DataFeatureFlag: NodeTypeDefinition = { - name: 'DataFeatureFlag', +export const LimitFeatureFlag: NodeTypeDefinition = { + name: 'LimitFeatureFlag', slug: 'data_feature_flag', - category: 'data', + category: 'limit', display_name: 'Feature Flag', description: 'Gates a table behind a feature flag backed by the cap tables. Attaches a BEFORE INSERT trigger that checks whether the named feature cap value is > 0. Features are modeled as caps with max=0 (disabled) or max=1 (enabled) in limit_caps / limit_caps_defaults tables. Resolution: COALESCE(per-entity cap, scope default, 0).', diff --git a/packages/node-type-registry/src/data/data-limit-counter.ts b/packages/node-type-registry/src/data/data-limit-counter.ts index 9ee48a058..4d7d9ea76 100644 --- a/packages/node-type-registry/src/data/data-limit-counter.ts +++ b/packages/node-type-registry/src/data/data-limit-counter.ts @@ -1,9 +1,9 @@ import type { NodeTypeDefinition } from '../types'; -export const DataLimitCounter: NodeTypeDefinition = { - name: 'DataLimitCounter', +export const LimitCounter: NodeTypeDefinition = { + name: 'LimitCounter', slug: 'data_limit_counter', - category: 'data', + category: 'limit', display_name: 'Limit Counter', description: 'Declaratively attaches limit-tracking triggers to a table. On INSERT the named limit is incremented; on DELETE it is decremented. Requires a provisioned limits_module for the target scope.', diff --git a/packages/node-type-registry/src/data/index.ts b/packages/node-type-registry/src/data/index.ts index b543ef4fd..6cdeab7b8 100644 --- a/packages/node-type-registry/src/data/index.ts +++ b/packages/node-type-registry/src/data/index.ts @@ -2,15 +2,15 @@ export { CheckGreaterThan } from './check-greater-than'; export { CheckLessThan } from './check-less-than'; export { CheckNotEqual } from './check-not-equal'; export { CheckOneOf } from './check-one-of'; -export { DataAggregateLimitCounter } from './data-aggregate-limit-counter'; -export { DataBillingMeter } from './data-billing-meter'; +export { LimitAggregate } from './data-aggregate-limit-counter'; +export { BillingMeter } from './data-billing-meter'; export { DataBulk } from './data-bulk'; export { ProcessChunks } from './data-chunks'; export { DataCompositeField } from './data-composite-field'; export { DataDirectOwner } from './data-direct-owner'; export { DataEntityMembership } from './data-entity-membership'; export { ProcessFileEmbedding } from './data-file-embedding'; -export { DataFeatureFlag } from './data-feature-flag'; +export { LimitFeatureFlag } from './data-feature-flag'; export { DataForceCurrentUser } from './data-force-current-user'; export { DataId } from './data-id'; export { ProcessImageEmbedding } from './data-image-embedding'; @@ -18,7 +18,7 @@ export { DataImmutableFields } from './data-immutable-fields'; export { DataInflection } from './data-inflection'; export { DataInheritFromParent } from './data-inherit-from-parent'; export { JobTrigger } from './data-job-trigger'; -export { DataLimitCounter } from './data-limit-counter'; +export { LimitCounter } from './data-limit-counter'; export { DataJsonb } from './data-jsonb'; export { DataOwnedFields } from './data-owned-fields'; export { ProcessExtraction } from './process-extraction'; From c12c2f888f9b796cdef98d4a3435c50b8b9a12fa Mon Sep 17 00:00:00 2001 From: Dan Lynch Date: Thu, 14 May 2026 21:07:52 +0000 Subject: [PATCH 3/4] refactor: make ProcessImageVersions versions required, remove hardcoded defaults - versions is now required (minItems: 1) with no default array - Blueprint must explicitly specify what image variants to generate - Prevents opinionated defaults (thumb 150x150, preview 800x600) from silently applying --- .../src/data/process-image-versions.ts | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/packages/node-type-registry/src/data/process-image-versions.ts b/packages/node-type-registry/src/data/process-image-versions.ts index 5cec5699b..7ab9b1426 100644 --- a/packages/node-type-registry/src/data/process-image-versions.ts +++ b/packages/node-type-registry/src/data/process-image-versions.ts @@ -26,6 +26,7 @@ export const ProcessImageVersions: NodeTypeDefinition = { 'file records linked to the source image.', parameter_schema: { type: 'object', + required: ['versions'], properties: { // ── Version definitions ─────────────────────────────────────── @@ -68,11 +69,9 @@ export const ProcessImageVersions: NodeTypeDefinition = { }, description: 'Array of version definitions. Each version specifies dimensions, ' + - 'format, and quality for a generated image variant.', - default: [ - { name: 'thumb', width: 150, height: 150, fit: 'cover', format: 'webp', quality: 80 }, - { name: 'preview', width: 800, height: 600, fit: 'inside', format: 'webp', quality: 85 } - ] + 'format, and quality for a generated image variant. ' + + 'Required — the blueprint must explicitly define what variants to generate.', + minItems: 1 }, // ── MIME scoping ────────────────────────────────────────────── From 8f9ef3c57b26a7b48f65e87eebcc850975b03734 Mon Sep 17 00:00:00 2001 From: Dan Lynch Date: Thu, 14 May 2026 21:08:10 +0000 Subject: [PATCH 4/4] chore: regenerate blueprint types (versions now required on ProcessImageVersions) --- packages/node-type-registry/src/blueprint-types.generated.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/node-type-registry/src/blueprint-types.generated.ts b/packages/node-type-registry/src/blueprint-types.generated.ts index e60b1b280..f717a414c 100644 --- a/packages/node-type-registry/src/blueprint-types.generated.ts +++ b/packages/node-type-registry/src/blueprint-types.generated.ts @@ -657,8 +657,8 @@ export interface ProcessExtractionParams { } /** Creates a job trigger for image variant generation. Fires when an image file is uploaded (status = 'uploaded') or on INSERT. The external worker generates resized, cropped, or reformatted versions (thumbnails, previews, WebP conversions, etc.) and stores them as new file records linked to the source image. */ export interface ProcessImageVersionsParams { - /* Array of version definitions. Each version specifies dimensions, format, and quality for a generated image variant. */ - versions?: { + /* Array of version definitions. Each version specifies dimensions, format, and quality for a generated image variant. Required — the blueprint must explicitly define what variants to generate. */ + versions: { /* Version identifier (e.g., "thumb", "preview", "hero") */name: string; /* Target width in pixels */width?: number; /* Target height in pixels */height?: number;